class (torch.nn.Module): def forward(self, arg0_1: "f32[1, 768, 68, 68]", arg1_1: "f32[256, 768, 3, 3]", arg2_1: "f32[256]", arg3_1: "f32[256]", arg4_1: "f32[256]", arg5_1: "f32[256, 256, 3, 3]", arg6_1: "f32[256]", arg7_1: "f32[256]", arg8_1: "f32[256]", arg9_1: "f32[256, 768, 3, 3]", arg10_1: "f32[256]", arg11_1: "f32[256]", arg12_1: "f32[256]", arg13_1: "f32[256, 256, 3, 3]", arg14_1: "f32[256]", arg15_1: "f32[256]", arg16_1: "f32[256]", arg17_1: "f32[256, 256, 3, 3]", arg18_1: "f32[256]", arg19_1: "f32[256]", arg20_1: "f32[256]"): # File: /workspace/networks/layers/basic.py:69 in forward, code: r = self.conv1(F.relu(x)) relu: "f32[1, 768, 68, 68]" = torch.ops.aten.relu.default(arg0_1) # File: /opt/conda/lib/python3.11/site-packages/torch/nn/modules/conv.py:453 in _conv_forward, code: return F.conv2d(input, weight, bias, self.stride, convert_element_type: "f16[256]" = torch.ops.prims.convert_element_type.default(arg2_1, torch.float16); arg2_1 = None convert_element_type_1: "f16[256, 768, 3, 3]" = torch.ops.prims.convert_element_type.default(arg1_1, torch.float16); arg1_1 = None convert_element_type_2: "f16[1, 768, 68, 68]" = torch.ops.prims.convert_element_type.default(relu, torch.float16); relu = None convolution: "f16[1, 256, 68, 68]" = torch.ops.aten.convolution.default(convert_element_type_2, convert_element_type_1, convert_element_type, [1, 1], [1, 1], [1, 1], False, [0, 0], 1); convert_element_type_2 = convert_element_type_1 = convert_element_type = None # File: /opt/conda/lib/python3.11/site-packages/torch/nn/modules/normalization.py:287 in forward, code: return F.group_norm( convert_element_type_3: "f32[1, 256, 68, 68]" = torch.ops.prims.convert_element_type.default(convolution, torch.float32); convolution = None view: "f32[1, 8, 32, 4624]" = torch.ops.aten.view.default(convert_element_type_3, [1, 8, 32, 4624]); convert_element_type_3 = None var_mean = torch.ops.aten.var_mean.correction(view, [2, 3], correction = 0, keepdim = True) getitem: "f32[1, 8, 1, 1]" = var_mean[0] getitem_1: "f32[1, 8, 1, 1]" = var_mean[1]; var_mean = None add: "f32[1, 8, 1, 1]" = torch.ops.aten.add.Tensor(getitem, 1e-05); getitem = None rsqrt: "f32[1, 8, 1, 1]" = torch.ops.aten.rsqrt.default(add); add = None sub: "f32[1, 8, 32, 4624]" = torch.ops.aten.sub.Tensor(view, getitem_1); view = getitem_1 = None mul: "f32[1, 8, 32, 4624]" = torch.ops.aten.mul.Tensor(sub, rsqrt); sub = rsqrt = None view_1: "f32[1, 256, 68, 68]" = torch.ops.aten.view.default(mul, [1, 256, 68, 68]); mul = None unsqueeze: "f32[1, 256]" = torch.ops.aten.unsqueeze.default(arg4_1, 0); arg4_1 = None unsqueeze_1: "f32[1, 256, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze, 2); unsqueeze = None unsqueeze_2: "f32[1, 256, 1, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_1, 3); unsqueeze_1 = None unsqueeze_3: "f32[1, 256]" = torch.ops.aten.unsqueeze.default(arg3_1, 0); arg3_1 = None unsqueeze_4: "f32[1, 256, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_3, 2); unsqueeze_3 = None unsqueeze_5: "f32[1, 256, 1, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_4, 3); unsqueeze_4 = None mul_1: "f32[1, 256, 68, 68]" = torch.ops.aten.mul.Tensor(view_1, unsqueeze_5); view_1 = unsqueeze_5 = None add_1: "f32[1, 256, 68, 68]" = torch.ops.aten.add.Tensor(mul_1, unsqueeze_2); mul_1 = unsqueeze_2 = None # File: /workspace/networks/layers/basic.py:72 in forward, code: r = self.conv2(F.relu(r)) relu_1: "f32[1, 256, 68, 68]" = torch.ops.aten.relu.default(add_1); add_1 = None # File: /opt/conda/lib/python3.11/site-packages/torch/nn/modules/conv.py:453 in _conv_forward, code: return F.conv2d(input, weight, bias, self.stride, convert_element_type_4: "f16[256]" = torch.ops.prims.convert_element_type.default(arg6_1, torch.float16); arg6_1 = None convert_element_type_5: "f16[256, 256, 3, 3]" = torch.ops.prims.convert_element_type.default(arg5_1, torch.float16); arg5_1 = None convert_element_type_6: "f16[1, 256, 68, 68]" = torch.ops.prims.convert_element_type.default(relu_1, torch.float16); relu_1 = None convolution_1: "f16[1, 256, 68, 68]" = torch.ops.aten.convolution.default(convert_element_type_6, convert_element_type_5, convert_element_type_4, [1, 1], [1, 1], [1, 1], False, [0, 0], 1); convert_element_type_6 = convert_element_type_5 = convert_element_type_4 = None # File: /opt/conda/lib/python3.11/site-packages/torch/nn/modules/normalization.py:287 in forward, code: return F.group_norm( convert_element_type_7: "f32[1, 256, 68, 68]" = torch.ops.prims.convert_element_type.default(convolution_1, torch.float32); convolution_1 = None view_2: "f32[1, 8, 32, 4624]" = torch.ops.aten.view.default(convert_element_type_7, [1, 8, 32, 4624]); convert_element_type_7 = None var_mean_1 = torch.ops.aten.var_mean.correction(view_2, [2, 3], correction = 0, keepdim = True) getitem_2: "f32[1, 8, 1, 1]" = var_mean_1[0] getitem_3: "f32[1, 8, 1, 1]" = var_mean_1[1]; var_mean_1 = None add_2: "f32[1, 8, 1, 1]" = torch.ops.aten.add.Tensor(getitem_2, 1e-05); getitem_2 = None rsqrt_1: "f32[1, 8, 1, 1]" = torch.ops.aten.rsqrt.default(add_2); add_2 = None sub_1: "f32[1, 8, 32, 4624]" = torch.ops.aten.sub.Tensor(view_2, getitem_3); view_2 = getitem_3 = None mul_2: "f32[1, 8, 32, 4624]" = torch.ops.aten.mul.Tensor(sub_1, rsqrt_1); sub_1 = rsqrt_1 = None view_3: "f32[1, 256, 68, 68]" = torch.ops.aten.view.default(mul_2, [1, 256, 68, 68]); mul_2 = None unsqueeze_6: "f32[1, 256]" = torch.ops.aten.unsqueeze.default(arg8_1, 0); arg8_1 = None unsqueeze_7: "f32[1, 256, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_6, 2); unsqueeze_6 = None unsqueeze_8: "f32[1, 256, 1, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_7, 3); unsqueeze_7 = None unsqueeze_9: "f32[1, 256]" = torch.ops.aten.unsqueeze.default(arg7_1, 0); arg7_1 = None unsqueeze_10: "f32[1, 256, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_9, 2); unsqueeze_9 = None unsqueeze_11: "f32[1, 256, 1, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_10, 3); unsqueeze_10 = None mul_3: "f32[1, 256, 68, 68]" = torch.ops.aten.mul.Tensor(view_3, unsqueeze_11); view_3 = unsqueeze_11 = None add_3: "f32[1, 256, 68, 68]" = torch.ops.aten.add.Tensor(mul_3, unsqueeze_8); mul_3 = unsqueeze_8 = None # File: /opt/conda/lib/python3.11/site-packages/torch/nn/modules/conv.py:453 in _conv_forward, code: return F.conv2d(input, weight, bias, self.stride, convert_element_type_8: "f16[256]" = torch.ops.prims.convert_element_type.default(arg10_1, torch.float16); arg10_1 = None convert_element_type_9: "f16[256, 768, 3, 3]" = torch.ops.prims.convert_element_type.default(arg9_1, torch.float16); arg9_1 = None convert_element_type_10: "f16[1, 768, 68, 68]" = torch.ops.prims.convert_element_type.default(arg0_1, torch.float16); arg0_1 = None convolution_2: "f16[1, 256, 68, 68]" = torch.ops.aten.convolution.default(convert_element_type_10, convert_element_type_9, convert_element_type_8, [1, 1], [1, 1], [1, 1], False, [0, 0], 1); convert_element_type_10 = convert_element_type_9 = convert_element_type_8 = None # File: /opt/conda/lib/python3.11/site-packages/torch/nn/modules/normalization.py:287 in forward, code: return F.group_norm( convert_element_type_11: "f32[1, 256, 68, 68]" = torch.ops.prims.convert_element_type.default(convolution_2, torch.float32); convolution_2 = None view_4: "f32[1, 8, 32, 4624]" = torch.ops.aten.view.default(convert_element_type_11, [1, 8, 32, 4624]); convert_element_type_11 = None var_mean_2 = torch.ops.aten.var_mean.correction(view_4, [2, 3], correction = 0, keepdim = True) getitem_4: "f32[1, 8, 1, 1]" = var_mean_2[0] getitem_5: "f32[1, 8, 1, 1]" = var_mean_2[1]; var_mean_2 = None add_4: "f32[1, 8, 1, 1]" = torch.ops.aten.add.Tensor(getitem_4, 1e-05); getitem_4 = None rsqrt_2: "f32[1, 8, 1, 1]" = torch.ops.aten.rsqrt.default(add_4); add_4 = None sub_2: "f32[1, 8, 32, 4624]" = torch.ops.aten.sub.Tensor(view_4, getitem_5); view_4 = getitem_5 = None mul_4: "f32[1, 8, 32, 4624]" = torch.ops.aten.mul.Tensor(sub_2, rsqrt_2); sub_2 = rsqrt_2 = None view_5: "f32[1, 256, 68, 68]" = torch.ops.aten.view.default(mul_4, [1, 256, 68, 68]); mul_4 = None unsqueeze_12: "f32[1, 256]" = torch.ops.aten.unsqueeze.default(arg12_1, 0); arg12_1 = None unsqueeze_13: "f32[1, 256, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_12, 2); unsqueeze_12 = None unsqueeze_14: "f32[1, 256, 1, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_13, 3); unsqueeze_13 = None unsqueeze_15: "f32[1, 256]" = torch.ops.aten.unsqueeze.default(arg11_1, 0); arg11_1 = None unsqueeze_16: "f32[1, 256, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_15, 2); unsqueeze_15 = None unsqueeze_17: "f32[1, 256, 1, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_16, 3); unsqueeze_16 = None mul_5: "f32[1, 256, 68, 68]" = torch.ops.aten.mul.Tensor(view_5, unsqueeze_17); view_5 = unsqueeze_17 = None add_5: "f32[1, 256, 68, 68]" = torch.ops.aten.add.Tensor(mul_5, unsqueeze_14); mul_5 = unsqueeze_14 = None # File: /workspace/networks/layers/basic.py:78 in forward, code: return x + r add_6: "f32[1, 256, 68, 68]" = torch.ops.aten.add.Tensor(add_5, add_3); add_5 = add_3 = None # File: /workspace/networks/layers/basic.py:69 in forward, code: r = self.conv1(F.relu(x)) relu_2: "f32[1, 256, 68, 68]" = torch.ops.aten.relu.default(add_6) # File: /opt/conda/lib/python3.11/site-packages/torch/nn/modules/conv.py:453 in _conv_forward, code: return F.conv2d(input, weight, bias, self.stride, convert_element_type_12: "f16[256]" = torch.ops.prims.convert_element_type.default(arg14_1, torch.float16); arg14_1 = None convert_element_type_13: "f16[256, 256, 3, 3]" = torch.ops.prims.convert_element_type.default(arg13_1, torch.float16); arg13_1 = None convert_element_type_14: "f16[1, 256, 68, 68]" = torch.ops.prims.convert_element_type.default(relu_2, torch.float16); relu_2 = None convolution_3: "f16[1, 256, 68, 68]" = torch.ops.aten.convolution.default(convert_element_type_14, convert_element_type_13, convert_element_type_12, [1, 1], [1, 1], [1, 1], False, [0, 0], 1); convert_element_type_14 = convert_element_type_13 = convert_element_type_12 = None # File: /opt/conda/lib/python3.11/site-packages/torch/nn/modules/normalization.py:287 in forward, code: return F.group_norm( convert_element_type_15: "f32[1, 256, 68, 68]" = torch.ops.prims.convert_element_type.default(convolution_3, torch.float32); convolution_3 = None view_6: "f32[1, 8, 32, 4624]" = torch.ops.aten.view.default(convert_element_type_15, [1, 8, 32, 4624]); convert_element_type_15 = None var_mean_3 = torch.ops.aten.var_mean.correction(view_6, [2, 3], correction = 0, keepdim = True) getitem_6: "f32[1, 8, 1, 1]" = var_mean_3[0] getitem_7: "f32[1, 8, 1, 1]" = var_mean_3[1]; var_mean_3 = None add_7: "f32[1, 8, 1, 1]" = torch.ops.aten.add.Tensor(getitem_6, 1e-05); getitem_6 = None rsqrt_3: "f32[1, 8, 1, 1]" = torch.ops.aten.rsqrt.default(add_7); add_7 = None sub_3: "f32[1, 8, 32, 4624]" = torch.ops.aten.sub.Tensor(view_6, getitem_7); view_6 = getitem_7 = None mul_6: "f32[1, 8, 32, 4624]" = torch.ops.aten.mul.Tensor(sub_3, rsqrt_3); sub_3 = rsqrt_3 = None view_7: "f32[1, 256, 68, 68]" = torch.ops.aten.view.default(mul_6, [1, 256, 68, 68]); mul_6 = None unsqueeze_18: "f32[1, 256]" = torch.ops.aten.unsqueeze.default(arg16_1, 0); arg16_1 = None unsqueeze_19: "f32[1, 256, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_18, 2); unsqueeze_18 = None unsqueeze_20: "f32[1, 256, 1, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_19, 3); unsqueeze_19 = None unsqueeze_21: "f32[1, 256]" = torch.ops.aten.unsqueeze.default(arg15_1, 0); arg15_1 = None unsqueeze_22: "f32[1, 256, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_21, 2); unsqueeze_21 = None unsqueeze_23: "f32[1, 256, 1, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_22, 3); unsqueeze_22 = None mul_7: "f32[1, 256, 68, 68]" = torch.ops.aten.mul.Tensor(view_7, unsqueeze_23); view_7 = unsqueeze_23 = None add_8: "f32[1, 256, 68, 68]" = torch.ops.aten.add.Tensor(mul_7, unsqueeze_20); mul_7 = unsqueeze_20 = None # File: /workspace/networks/layers/basic.py:72 in forward, code: r = self.conv2(F.relu(r)) relu_3: "f32[1, 256, 68, 68]" = torch.ops.aten.relu.default(add_8); add_8 = None # File: /opt/conda/lib/python3.11/site-packages/torch/nn/modules/conv.py:453 in _conv_forward, code: return F.conv2d(input, weight, bias, self.stride, convert_element_type_16: "f16[256]" = torch.ops.prims.convert_element_type.default(arg18_1, torch.float16); arg18_1 = None convert_element_type_17: "f16[256, 256, 3, 3]" = torch.ops.prims.convert_element_type.default(arg17_1, torch.float16); arg17_1 = None convert_element_type_18: "f16[1, 256, 68, 68]" = torch.ops.prims.convert_element_type.default(relu_3, torch.float16); relu_3 = None convolution_4: "f16[1, 256, 68, 68]" = torch.ops.aten.convolution.default(convert_element_type_18, convert_element_type_17, convert_element_type_16, [1, 1], [1, 1], [1, 1], False, [0, 0], 1); convert_element_type_18 = convert_element_type_17 = convert_element_type_16 = None # File: /opt/conda/lib/python3.11/site-packages/torch/nn/modules/normalization.py:287 in forward, code: return F.group_norm( convert_element_type_19: "f32[1, 256, 68, 68]" = torch.ops.prims.convert_element_type.default(convolution_4, torch.float32); convolution_4 = None view_8: "f32[1, 8, 32, 4624]" = torch.ops.aten.view.default(convert_element_type_19, [1, 8, 32, 4624]); convert_element_type_19 = None var_mean_4 = torch.ops.aten.var_mean.correction(view_8, [2, 3], correction = 0, keepdim = True) getitem_8: "f32[1, 8, 1, 1]" = var_mean_4[0] getitem_9: "f32[1, 8, 1, 1]" = var_mean_4[1]; var_mean_4 = None add_9: "f32[1, 8, 1, 1]" = torch.ops.aten.add.Tensor(getitem_8, 1e-05); getitem_8 = None rsqrt_4: "f32[1, 8, 1, 1]" = torch.ops.aten.rsqrt.default(add_9); add_9 = None sub_4: "f32[1, 8, 32, 4624]" = torch.ops.aten.sub.Tensor(view_8, getitem_9); view_8 = getitem_9 = None mul_8: "f32[1, 8, 32, 4624]" = torch.ops.aten.mul.Tensor(sub_4, rsqrt_4); sub_4 = rsqrt_4 = None view_9: "f32[1, 256, 68, 68]" = torch.ops.aten.view.default(mul_8, [1, 256, 68, 68]); mul_8 = None unsqueeze_24: "f32[1, 256]" = torch.ops.aten.unsqueeze.default(arg20_1, 0); arg20_1 = None unsqueeze_25: "f32[1, 256, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_24, 2); unsqueeze_24 = None unsqueeze_26: "f32[1, 256, 1, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_25, 3); unsqueeze_25 = None unsqueeze_27: "f32[1, 256]" = torch.ops.aten.unsqueeze.default(arg19_1, 0); arg19_1 = None unsqueeze_28: "f32[1, 256, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_27, 2); unsqueeze_27 = None unsqueeze_29: "f32[1, 256, 1, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_28, 3); unsqueeze_28 = None mul_9: "f32[1, 256, 68, 68]" = torch.ops.aten.mul.Tensor(view_9, unsqueeze_29); view_9 = unsqueeze_29 = None add_10: "f32[1, 256, 68, 68]" = torch.ops.aten.add.Tensor(mul_9, unsqueeze_26); mul_9 = unsqueeze_26 = None # File: /workspace/networks/layers/basic.py:78 in forward, code: return x + r add_11: "f32[1, 256, 68, 68]" = torch.ops.aten.add.Tensor(add_6, add_10); add_6 = add_10 = None return (add_11,)