class (torch.nn.Module): def forward(self, arg0_1: "f16[1, 256, 68, 68]", arg1_1: "f32[225, 256, 1, 1]", arg2_1: "f32[225]", arg3_1: "f16[1, 256, 68, 68]", arg4_1: "f16[1, 256, 68, 68]"): # File: /workspace/networks/layers/attention.py:327 in forward, code: memory_mask = torch.ones((1, 1, h, w), device=v.device).float() full: "f32[1, 1, 68, 68]" = torch.ops.aten.full.default([1, 1, 68, 68], 1, dtype = torch.float32, layout = torch.strided, device = device(type='cuda', index=0), pin_memory = False) # File: /opt/conda/lib/python3.11/site-packages/torch/nn/functional.py:4552 in pad, code: return torch._C._nn.pad(input, pad, mode, value) constant_pad_nd: "f32[1, 1, 82, 82]" = torch.ops.aten.constant_pad_nd.default(full, [7, 7, 7, 7], 0.0); full = None # File: /workspace/networks/layers/attention.py:434 in pad_and_unfold, code: x = F.unfold(x, iota: "i64[68]" = torch.ops.prims.iota.default(68, start = 0, step = 1, dtype = torch.int64, device = device(type='cuda', index=0), requires_grad = False) unsqueeze: "i64[1, 68]" = torch.ops.aten.unsqueeze.default(iota, 0); iota = None iota_1: "i64[15]" = torch.ops.prims.iota.default(15, start = 0, step = 1, dtype = torch.int64, device = device(type='cuda', index=0), requires_grad = False) unsqueeze_1: "i64[15, 1]" = torch.ops.aten.unsqueeze.default(iota_1, -1); iota_1 = None add: "i64[15, 68]" = torch.ops.aten.add.Tensor(unsqueeze, unsqueeze_1); unsqueeze = unsqueeze_1 = None iota_2: "i64[68]" = torch.ops.prims.iota.default(68, start = 0, step = 1, dtype = torch.int64, device = device(type='cuda', index=0), requires_grad = False) unsqueeze_2: "i64[1, 68]" = torch.ops.aten.unsqueeze.default(iota_2, 0); iota_2 = None iota_3: "i64[15]" = torch.ops.prims.iota.default(15, start = 0, step = 1, dtype = torch.int64, device = device(type='cuda', index=0), requires_grad = False) unsqueeze_3: "i64[15, 1]" = torch.ops.aten.unsqueeze.default(iota_3, -1); iota_3 = None add_1: "i64[15, 68]" = torch.ops.aten.add.Tensor(unsqueeze_2, unsqueeze_3); unsqueeze_2 = unsqueeze_3 = None constant_pad_nd_1: "f32[1, 1, 82, 82]" = torch.ops.aten.constant_pad_nd.default(constant_pad_nd, [0, 0, 0, 0], 0.0); constant_pad_nd = None unsqueeze_4: "i64[15, 68, 1]" = torch.ops.aten.unsqueeze.default(add, -1); add = None unsqueeze_5: "i64[15, 68, 1, 1]" = torch.ops.aten.unsqueeze.default(unsqueeze_4, -1); unsqueeze_4 = None slice_1: "f32[1, 1, 82, 82]" = torch.ops.aten.slice.Tensor(constant_pad_nd_1, 0, 0, 9223372036854775807); constant_pad_nd_1 = None slice_2: "f32[1, 1, 82, 82]" = torch.ops.aten.slice.Tensor(slice_1, 1, 0, 9223372036854775807); slice_1 = None index: "f32[1, 1, 15, 68, 15, 68]" = torch.ops.aten.index.Tensor(slice_2, [None, None, unsqueeze_5, add_1]); slice_2 = unsqueeze_5 = add_1 = None permute: "f32[1, 1, 15, 15, 68, 68]" = torch.ops.aten.permute.default(index, [0, 1, 2, 4, 3, 5]); index = None clone: "f32[1, 1, 15, 15, 68, 68]" = torch.ops.aten.clone.default(permute, memory_format = torch.contiguous_format); permute = None view: "f32[1, 225, 4624]" = torch.ops.aten.view.default(clone, [1, 225, 4624]); clone = None # File: /workspace/networks/layers/attention.py:328 in forward, code: unfolded_k_mask = self.pad_and_unfold(memory_mask).view( view_1: "f32[1, 1, 225, 4624]" = torch.ops.aten.view.default(view, [1, 1, 225, 4624]); view = None # File: /workspace/networks/layers/attention.py:330 in forward, code: qk_mask = 1 - unfolded_k_mask sub: "f32[1, 1, 225, 4624]" = torch.ops.aten.sub.Tensor(1, view_1); view_1 = None # File: /opt/conda/lib/python3.11/site-packages/torch/nn/modules/conv.py:453 in _conv_forward, code: return F.conv2d(input, weight, bias, self.stride, convert_element_type: "f16[225]" = torch.ops.prims.convert_element_type.default(arg2_1, torch.float16); arg2_1 = None convert_element_type_1: "f16[225, 256, 1, 1]" = torch.ops.prims.convert_element_type.default(arg1_1, torch.float16); arg1_1 = None convolution: "f16[1, 225, 68, 68]" = torch.ops.aten.convolution.default(arg3_1, convert_element_type_1, convert_element_type, [1, 1], [0, 0], [1, 1], False, [0, 0], 1); convert_element_type_1 = convert_element_type = None # File: /workspace/networks/layers/attention.py:335 in forward, code: q = q / self.T div: "f16[1, 256, 68, 68]" = torch.ops.aten.div.Tensor(arg3_1, 16.0); arg3_1 = None # File: /workspace/networks/layers/attention.py:337 in forward, code: q = q.view(-1, self.d_att, h, w) view_2: "f16[1, 256, 68, 68]" = torch.ops.aten.view.default(div, [-1, 256, 68, 68]); div = None # File: /workspace/networks/layers/attention.py:338 in forward, code: k = k.view(-1, self.d_att, h, w) view_3: "f16[1, 256, 68, 68]" = torch.ops.aten.view.default(arg4_1, [-1, 256, 68, 68]); arg4_1 = None # File: /workspace/networks/layers/attention.py:339 in forward, code: v = v.view(-1, self.num_head, hidden_dim, h * w) view_4: "f16[1, 1, 256, 4624]" = torch.ops.aten.view.default(arg0_1, [-1, 1, 256, 4624]); arg0_1 = None # File: /workspace/networks/layers/attention.py:341 in forward, code: relative_emb = relative_emb.view(n, self.num_head, view_5: "f16[1, 1, 225, 4624]" = torch.ops.aten.view.default(convolution, [1, 1, 225, 4624]); convolution = None return (view_2, view_3, view_4, sub, view_5)