class (torch.nn.Module): def forward(self, arg0_1: "Sym(s0)", arg1_1: "Sym(s1)", arg2_1: "f16[1, 256, s0, s1]", arg3_1: "f32[225, 256, 1, 1]", arg4_1: "f32[225]", arg5_1: "f16[1, 256, s0, s1]", arg6_1: "f16[1, 256, s0, s1]"): # File: /workspace/networks/layers/attention.py:327 in forward, code: memory_mask = torch.ones((1, 1, h, w), device=v.device).float() full: "f32[1, 1, s0, s1]" = torch.ops.aten.full.default([1, 1, arg0_1, arg1_1], 1, dtype = torch.float32, layout = torch.strided, device = device(type='cuda', index=0), pin_memory = False) # File: /opt/conda/lib/python3.11/site-packages/torch/nn/functional.py:4552 in pad, code: return torch._C._nn.pad(input, pad, mode, value) constant_pad_nd: "f32[1, 1, s0 + 14, s1 + 14]" = torch.ops.aten.constant_pad_nd.default(full, [7, 7, 7, 7], 0.0); full = None # File: /workspace/networks/layers/attention.py:434 in pad_and_unfold, code: x = F.unfold(x, iota: "i64[s0]" = torch.ops.prims.iota.default(arg0_1, start = 0, step = 1, dtype = torch.int64, device = device(type='cuda', index=0), requires_grad = False) unsqueeze: "i64[1, s0]" = torch.ops.aten.unsqueeze.default(iota, 0); iota = None iota_1: "i64[15]" = torch.ops.prims.iota.default(15, start = 0, step = 1, dtype = torch.int64, device = device(type='cuda', index=0), requires_grad = False) unsqueeze_1: "i64[15, 1]" = torch.ops.aten.unsqueeze.default(iota_1, -1); iota_1 = None add_2: "i64[15, s0]" = torch.ops.aten.add.Tensor(unsqueeze, unsqueeze_1); unsqueeze = unsqueeze_1 = None iota_2: "i64[s1]" = torch.ops.prims.iota.default(arg1_1, start = 0, step = 1, dtype = torch.int64, device = device(type='cuda', index=0), requires_grad = False) unsqueeze_2: "i64[1, s1]" = torch.ops.aten.unsqueeze.default(iota_2, 0); iota_2 = None iota_3: "i64[15]" = torch.ops.prims.iota.default(15, start = 0, step = 1, dtype = torch.int64, device = device(type='cuda', index=0), requires_grad = False) unsqueeze_3: "i64[15, 1]" = torch.ops.aten.unsqueeze.default(iota_3, -1); iota_3 = None add_5: "i64[15, s1]" = torch.ops.aten.add.Tensor(unsqueeze_2, unsqueeze_3); unsqueeze_2 = unsqueeze_3 = None constant_pad_nd_1: "f32[1, 1, s0 + 14, s1 + 14]" = torch.ops.aten.constant_pad_nd.default(constant_pad_nd, [0, 0, 0, 0], 0.0); constant_pad_nd = None unsqueeze_4: "i64[15, s0, 1]" = torch.ops.aten.unsqueeze.default(add_2, -1); add_2 = None unsqueeze_5: "i64[15, s0, 1, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_4, -1); unsqueeze_4 = None slice_1: "f32[1, 1, s0 + 14, s1 + 14]" = torch.ops.aten.slice.Tensor(constant_pad_nd_1, 0, 0, 9223372036854775807); constant_pad_nd_1 = None slice_2: "f32[1, 1, s0 + 14, s1 + 14]" = torch.ops.aten.slice.Tensor(slice_1, 1, 0, 9223372036854775807); slice_1 = None index: "f32[1, 1, 15, s0, 15, s1]" = torch.ops.aten.index.Tensor(slice_2, [None, None, unsqueeze_5, add_5]); slice_2 = unsqueeze_5 = add_5 = None permute: "f32[1, 1, 15, 15, s0, s1]" = torch.ops.aten.permute.default(index, [0, 1, 2, 4, 3, 5]); index = None clone: "f32[1, 1, 15, 15, s0, s1]" = torch.ops.aten.clone.default(permute, memory_format = torch.contiguous_format); permute = None mul: "Sym(s0*s1)" = arg0_1 * arg1_1 view: "f32[1, 225, s0*s1]" = torch.ops.aten.view.default(clone, [1, 225, mul]); clone = None # File: /workspace/networks/layers/attention.py:328 in forward, code: unfolded_k_mask = self.pad_and_unfold(memory_mask).view( view_1: "f32[1, 1, 225, s0*s1]" = torch.ops.aten.view.default(view, [1, 1, 225, mul]); view = None # File: /workspace/networks/layers/attention.py:330 in forward, code: qk_mask = 1 - unfolded_k_mask sub_6: "f32[1, 1, 225, s0*s1]" = torch.ops.aten.sub.Tensor(1, view_1); view_1 = None # File: /opt/conda/lib/python3.11/site-packages/torch/nn/modules/conv.py:453 in _conv_forward, code: return F.conv2d(input, weight, bias, self.stride, convert_element_type: "f16[225]" = torch.ops.prims.convert_element_type.default(arg4_1, torch.float16); arg4_1 = None convert_element_type_1: "f16[225, 256, 1, 1]" = torch.ops.prims.convert_element_type.default(arg3_1, torch.float16); arg3_1 = None convolution: "f16[1, 225, s0, s1]" = torch.ops.aten.convolution.default(arg5_1, convert_element_type_1, convert_element_type, [1, 1], [0, 0], [1, 1], False, [0, 0], 1); convert_element_type_1 = convert_element_type = None # File: /workspace/networks/layers/attention.py:335 in forward, code: q = q / self.T div: "f16[1, 256, s0, s1]" = torch.ops.aten.div.Tensor(arg5_1, 16.0); arg5_1 = None # File: /workspace/networks/layers/attention.py:337 in forward, code: q = q.view(-1, self.d_att, h, w) view_2: "f16[1, 256, s0, s1]" = torch.ops.aten.view.default(div, [-1, 256, arg0_1, arg1_1]); div = None # File: /workspace/networks/layers/attention.py:338 in forward, code: k = k.view(-1, self.d_att, h, w) view_3: "f16[1, 256, s0, s1]" = torch.ops.aten.view.default(arg6_1, [-1, 256, arg0_1, arg1_1]); arg6_1 = None # File: /workspace/networks/layers/attention.py:339 in forward, code: v = v.view(-1, self.num_head, hidden_dim, h * w) view_4: "f16[1, 1, 256, s0*s1]" = torch.ops.aten.view.default(arg2_1, [-1, 1, 256, mul]); arg2_1 = None # File: /workspace/networks/layers/attention.py:341 in forward, code: relative_emb = relative_emb.view(n, self.num_head, view_5: "f16[1, 1, 225, s0*s1]" = torch.ops.aten.view.default(convolution, [1, 1, 225, mul]); convolution = mul = None return (view_2, view_3, view_4, arg0_1, arg1_1, sub_6, view_5)