Created
October 31, 2023 03:38
-
-
Save d4l3k/355d2552aff154ec522ab678757f1587 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
============================= test session starts ============================== | |
platform linux -- Python 3.11.4, pytest-7.4.2, pluggy-1.3.0 | |
rootdir: /mnt/ext/torchdrive | |
plugins: anyio-4.0.0, typeguard-2.13.3, xdist-3.3.1 | |
collected 2 items / 1 deselected / 1 selected | |
torchworld/transforms/test_sfm.py F [100%] | |
=================================== FAILURES =================================== | |
_____________________________ TestSFM.test_export ______________________________ | |
self = <test_sfm.TestSFM testMethod=test_export> | |
def test_export(self) -> None: | |
data = torch.ones(2, 3, 4, 6) | |
model = MyModel() | |
model(data) | |
exported = export(model, args=(data,)) | |
self.assertIsNotNone(exported) | |
print(exported) | |
> self.fail() | |
E AssertionError: None | |
torchworld/transforms/test_sfm.py:50: AssertionError | |
----------------------------- Captured stdout call ----------------------------- | |
ExportedProgram: | |
class GraphModule(torch.nn.Module): | |
def forward(self, arg0_1: f32[2, 1, 4, 6], arg1_1: f32[1, 2], arg2_1: f32[1, 2], arg3_1: f32[1, 3, 3], arg4_1: f32[1, 3], arg5_1: f32[2, 3, 4, 6]): | |
# | |
rand: f32[2] = torch.ops.aten.rand.default([2], device = device(type='cpu'), pin_memory = False) | |
full: f32[2, 1, 4, 6] = torch.ops.aten.full.default([2, 1, 4, 6], 1, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
full_1: f32[2, 3, 4, 6] = torch.ops.aten.full.default([2, 3, 4, 6], 0, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
arange: f32[4] = torch.ops.aten.arange.start_step(-1, 0.99999999, 0.5, device = device(type='cpu'), pin_memory = False) | |
arange_1: f32[6] = torch.ops.aten.arange.start_step(-1, 0.99999999, 0.3333333333333333, device = device(type='cpu'), pin_memory = False) | |
view: f32[4, 1] = torch.ops.aten.view.default(arange, [-1, 1]); arange = None | |
expand: f32[4, 6] = torch.ops.aten.expand.default(view, [4, 6]); view = None | |
view_1: f32[1, 6] = torch.ops.aten.view.default(arange_1, [1, -1]); arange_1 = None | |
expand_1: f32[4, 6] = torch.ops.aten.expand.default(view_1, [4, 6]); view_1 = None | |
unsqueeze: f32[4, 6, 1] = torch.ops.aten.unsqueeze.default(expand, 2); expand = None | |
unsqueeze_1: f32[4, 6, 1] = torch.ops.aten.unsqueeze.default(expand_1, 2); expand_1 = None | |
cat: f32[4, 6, 2] = torch.ops.aten.cat.default([unsqueeze, unsqueeze_1], -1); unsqueeze = unsqueeze_1 = None | |
expand_2: f32[2, 4, 6, 2] = torch.ops.aten.expand.default(cat, [2, -1, -1, -1]); cat = None | |
permute: f32[2, 4, 6, 1] = torch.ops.aten.permute.default(full, [0, 2, 3, 1]); full = None | |
cat_1: f32[2, 4, 6, 3] = torch.ops.aten.cat.default([expand_2, permute], -1); expand_2 = permute = None | |
view_2: f32[2, 24, 3] = torch.ops.aten.view.default(cat_1, [2, 24, 3]); cat_1 = None | |
arange_2: i64[4] = torch.ops.aten.arange.start_step(0, 4, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
arange_3: i64[4] = torch.ops.aten.arange.start_step(0, 4, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
unsqueeze_2: i64[4, 1] = torch.ops.aten.unsqueeze.default(arange_2, -1); arange_2 = None | |
eq: b8[4, 4] = torch.ops.aten.eq.Tensor(unsqueeze_2, arange_3); unsqueeze_2 = arange_3 = None | |
full_2: f32[1] = torch.ops.aten.full.default([1], 1, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
scalar_tensor: f32[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')) | |
where: f32[4, 4] = torch.ops.aten.where.self(eq, full_2, scalar_tensor); eq = full_2 = scalar_tensor = None | |
view_3: f32[1, 4, 4] = torch.ops.aten.view.default(where, [1, 4, 4]); where = None | |
repeat: f32[1, 4, 4] = torch.ops.aten.repeat.default(view_3, [1, 1, 1]); view_3 = None | |
slice_1: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(repeat, 0, 0, 9223372036854775807) | |
slice_2: f32[1, 3, 4] = torch.ops.aten.slice.Tensor(slice_1, 1, 0, 3); slice_1 = None | |
slice_3: f32[1, 3, 3] = torch.ops.aten.slice.Tensor(slice_2, 2, 0, 3); slice_2 = None | |
copy: f32[1, 3, 3] = torch.ops.aten.copy.default(slice_3, arg3_1); slice_3 = None | |
slice_4: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(repeat, 0, 0, 9223372036854775807) | |
slice_5: f32[1, 3, 4] = torch.ops.aten.slice.Tensor(slice_4, 1, 0, 3) | |
slice_scatter: f32[1, 3, 4] = torch.ops.aten.slice_scatter.default(slice_5, copy, 2, 0, 3); slice_5 = copy = None | |
slice_scatter_1: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(slice_4, slice_scatter, 1, 0, 3); slice_4 = slice_scatter = None | |
slice_scatter_2: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(repeat, slice_scatter_1, 0, 0, 9223372036854775807); repeat = slice_scatter_1 = None | |
arange_4: i64[4] = torch.ops.aten.arange.start_step(0, 4, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
arange_5: i64[4] = torch.ops.aten.arange.start_step(0, 4, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
unsqueeze_3: i64[4, 1] = torch.ops.aten.unsqueeze.default(arange_4, -1); arange_4 = None | |
eq_1: b8[4, 4] = torch.ops.aten.eq.Tensor(unsqueeze_3, arange_5); unsqueeze_3 = arange_5 = None | |
full_3: f32[1] = torch.ops.aten.full.default([1], 1, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
scalar_tensor_1: f32[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')) | |
where_1: f32[4, 4] = torch.ops.aten.where.self(eq_1, full_3, scalar_tensor_1); eq_1 = full_3 = scalar_tensor_1 = None | |
view_4: f32[1, 4, 4] = torch.ops.aten.view.default(where_1, [1, 4, 4]); where_1 = None | |
repeat_1: f32[1, 4, 4] = torch.ops.aten.repeat.default(view_4, [1, 1, 1]); view_4 = None | |
slice_6: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(repeat_1, 0, 0, 9223372036854775807) | |
select: f32[1, 4] = torch.ops.aten.select.int(slice_6, 1, 3); slice_6 = None | |
slice_7: f32[1, 3] = torch.ops.aten.slice.Tensor(select, 1, 0, 3); select = None | |
copy_1: f32[1, 3] = torch.ops.aten.copy.default(slice_7, arg4_1); slice_7 = None | |
slice_8: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(repeat_1, 0, 0, 9223372036854775807) | |
select_1: f32[1, 4] = torch.ops.aten.select.int(slice_8, 1, 3) | |
slice_scatter_3: f32[1, 4] = torch.ops.aten.slice_scatter.default(select_1, copy_1, 1, 0, 3); select_1 = copy_1 = None | |
select_scatter: f32[1, 4, 4] = torch.ops.aten.select_scatter.default(slice_8, slice_scatter_3, 1, 3); slice_8 = slice_scatter_3 = None | |
slice_scatter_4: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(repeat_1, select_scatter, 0, 0, 9223372036854775807); repeat_1 = select_scatter = None | |
clone: f32[1, 4, 4] = torch.ops.aten.clone.default(slice_scatter_2); slice_scatter_2 = None | |
unbind = torch.ops.aten.unbind.int(arg1_1, 1) | |
getitem: f32[1] = unbind[0] | |
getitem_1: f32[1] = unbind[1]; unbind = None | |
unbind_1 = torch.ops.aten.unbind.int(arg2_1, 1) | |
getitem_2: f32[1] = unbind_1[0] | |
getitem_3: f32[1] = unbind_1[1]; unbind_1 = None | |
full_4: f32[1, 4, 4] = torch.ops.aten.full.default([1, 4, 4], 0, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
slice_9: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(full_4, 0, 0, 9223372036854775807) | |
select_2: f32[1, 4] = torch.ops.aten.select.int(slice_9, 1, 0); slice_9 = None | |
select_3: f32[1] = torch.ops.aten.select.int(select_2, 1, 0); select_2 = None | |
copy_2: f32[1] = torch.ops.aten.copy.default(select_3, getitem); select_3 = getitem = None | |
slice_10: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(full_4, 0, 0, 9223372036854775807) | |
select_4: f32[1, 4] = torch.ops.aten.select.int(slice_10, 1, 0) | |
select_scatter_1: f32[1, 4] = torch.ops.aten.select_scatter.default(select_4, copy_2, 1, 0); select_4 = copy_2 = None | |
select_scatter_2: f32[1, 4, 4] = torch.ops.aten.select_scatter.default(slice_10, select_scatter_1, 1, 0); slice_10 = select_scatter_1 = None | |
slice_scatter_5: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(full_4, select_scatter_2, 0, 0, 9223372036854775807); full_4 = select_scatter_2 = None | |
slice_11: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_5, 0, 0, 9223372036854775807) | |
select_5: f32[1, 4] = torch.ops.aten.select.int(slice_11, 1, 1); slice_11 = None | |
select_6: f32[1] = torch.ops.aten.select.int(select_5, 1, 1); select_5 = None | |
copy_3: f32[1] = torch.ops.aten.copy.default(select_6, getitem_1); select_6 = getitem_1 = None | |
slice_12: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_5, 0, 0, 9223372036854775807) | |
select_7: f32[1, 4] = torch.ops.aten.select.int(slice_12, 1, 1) | |
select_scatter_3: f32[1, 4] = torch.ops.aten.select_scatter.default(select_7, copy_3, 1, 1); select_7 = copy_3 = None | |
select_scatter_4: f32[1, 4, 4] = torch.ops.aten.select_scatter.default(slice_12, select_scatter_3, 1, 1); slice_12 = select_scatter_3 = None | |
slice_scatter_6: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(slice_scatter_5, select_scatter_4, 0, 0, 9223372036854775807); slice_scatter_5 = select_scatter_4 = None | |
slice_13: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_6, 0, 0, 9223372036854775807) | |
select_8: f32[1, 4] = torch.ops.aten.select.int(slice_13, 1, 0); slice_13 = None | |
select_9: f32[1] = torch.ops.aten.select.int(select_8, 1, 2); select_8 = None | |
copy_4: f32[1] = torch.ops.aten.copy.default(select_9, getitem_2); select_9 = getitem_2 = None | |
slice_14: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_6, 0, 0, 9223372036854775807) | |
select_10: f32[1, 4] = torch.ops.aten.select.int(slice_14, 1, 0) | |
select_scatter_5: f32[1, 4] = torch.ops.aten.select_scatter.default(select_10, copy_4, 1, 2); select_10 = copy_4 = None | |
select_scatter_6: f32[1, 4, 4] = torch.ops.aten.select_scatter.default(slice_14, select_scatter_5, 1, 0); slice_14 = select_scatter_5 = None | |
slice_scatter_7: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(slice_scatter_6, select_scatter_6, 0, 0, 9223372036854775807); slice_scatter_6 = select_scatter_6 = None | |
slice_15: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_7, 0, 0, 9223372036854775807) | |
select_11: f32[1, 4] = torch.ops.aten.select.int(slice_15, 1, 1); slice_15 = None | |
select_12: f32[1] = torch.ops.aten.select.int(select_11, 1, 2); select_11 = None | |
copy_5: f32[1] = torch.ops.aten.copy.default(select_12, getitem_3); select_12 = getitem_3 = None | |
slice_16: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_7, 0, 0, 9223372036854775807) | |
select_13: f32[1, 4] = torch.ops.aten.select.int(slice_16, 1, 1) | |
select_scatter_7: f32[1, 4] = torch.ops.aten.select_scatter.default(select_13, copy_5, 1, 2); select_13 = copy_5 = None | |
select_scatter_8: f32[1, 4, 4] = torch.ops.aten.select_scatter.default(slice_16, select_scatter_7, 1, 1); slice_16 = select_scatter_7 = None | |
slice_scatter_8: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(slice_scatter_7, select_scatter_8, 0, 0, 9223372036854775807); slice_scatter_7 = select_scatter_8 = None | |
_tensor_constant0: f32[] = self._tensor_constant0 | |
lift_fresh_copy: f32[] = torch.ops.aten.lift_fresh_copy.default(_tensor_constant0); _tensor_constant0 = None | |
slice_17: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_8, 0, 0, 9223372036854775807) | |
select_14: f32[1, 4] = torch.ops.aten.select.int(slice_17, 1, 3); slice_17 = None | |
select_15: f32[1] = torch.ops.aten.select.int(select_14, 1, 2); select_14 = None | |
copy_6: f32[1] = torch.ops.aten.copy.default(select_15, lift_fresh_copy); select_15 = lift_fresh_copy = None | |
slice_18: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_8, 0, 0, 9223372036854775807) | |
select_16: f32[1, 4] = torch.ops.aten.select.int(slice_18, 1, 3) | |
select_scatter_9: f32[1, 4] = torch.ops.aten.select_scatter.default(select_16, copy_6, 1, 2); select_16 = copy_6 = None | |
select_scatter_10: f32[1, 4, 4] = torch.ops.aten.select_scatter.default(slice_18, select_scatter_9, 1, 3); slice_18 = select_scatter_9 = None | |
slice_scatter_9: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(slice_scatter_8, select_scatter_10, 0, 0, 9223372036854775807); slice_scatter_8 = select_scatter_10 = None | |
_tensor_constant1: f32[] = self._tensor_constant1 | |
lift_fresh_copy_1: f32[] = torch.ops.aten.lift_fresh_copy.default(_tensor_constant1); _tensor_constant1 = None | |
slice_19: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_9, 0, 0, 9223372036854775807) | |
select_17: f32[1, 4] = torch.ops.aten.select.int(slice_19, 1, 2); slice_19 = None | |
select_18: f32[1] = torch.ops.aten.select.int(select_17, 1, 3); select_17 = None | |
copy_7: f32[1] = torch.ops.aten.copy.default(select_18, lift_fresh_copy_1); select_18 = lift_fresh_copy_1 = None | |
slice_20: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_9, 0, 0, 9223372036854775807) | |
select_19: f32[1, 4] = torch.ops.aten.select.int(slice_20, 1, 2) | |
select_scatter_11: f32[1, 4] = torch.ops.aten.select_scatter.default(select_19, copy_7, 1, 3); select_19 = copy_7 = None | |
select_scatter_12: f32[1, 4, 4] = torch.ops.aten.select_scatter.default(slice_20, select_scatter_11, 1, 2); slice_20 = select_scatter_11 = None | |
slice_scatter_10: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(slice_scatter_9, select_scatter_12, 0, 0, 9223372036854775807); slice_scatter_9 = select_scatter_12 = None | |
permute_1: f32[1, 4, 4] = torch.ops.aten.permute.default(slice_scatter_10, [0, 2, 1]); slice_scatter_10 = None | |
clone_1: f32[1, 4, 4] = torch.ops.aten.clone.default(permute_1, memory_format = torch.contiguous_format); permute_1 = None | |
view_5: f32[1, 4, 4] = torch.ops.aten.view.default(clone_1, [-1, 4, 4]); clone_1 = None | |
clone_2: f32[1, 4, 4] = torch.ops.aten.clone.default(clone); clone = None | |
arange_6: i64[4] = torch.ops.aten.arange.start_step(0, 4, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
arange_7: i64[4] = torch.ops.aten.arange.start_step(0, 4, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
unsqueeze_4: i64[4, 1] = torch.ops.aten.unsqueeze.default(arange_6, -1); arange_6 = None | |
eq_2: b8[4, 4] = torch.ops.aten.eq.Tensor(unsqueeze_4, arange_7); unsqueeze_4 = arange_7 = None | |
full_5: f32[1] = torch.ops.aten.full.default([1], 1, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
scalar_tensor_2: f32[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')) | |
where_2: f32[4, 4] = torch.ops.aten.where.self(eq_2, full_5, scalar_tensor_2); eq_2 = full_5 = scalar_tensor_2 = None | |
view_6: f32[1, 4, 4] = torch.ops.aten.view.default(where_2, [1, 4, 4]); where_2 = None | |
linalg_inv_ex = torch.ops.aten.linalg_inv_ex.default(clone_2); clone_2 = None | |
getitem_4: f32[1, 4, 4] = linalg_inv_ex[0]; linalg_inv_ex = None | |
linalg_inv_ex_1 = torch.ops.aten.linalg_inv_ex.default(view_5); view_5 = None | |
getitem_5: f32[1, 4, 4] = linalg_inv_ex_1[0]; linalg_inv_ex_1 = None | |
full_6: f32[1, 4, 4] = torch.ops.aten.full.default([1, 4, 4], 1, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
_tensor_constant2: f32[] = self._tensor_constant2 | |
lift_fresh_copy_2: f32[] = torch.ops.aten.lift_fresh_copy.default(_tensor_constant2); _tensor_constant2 = None | |
select_20: f32[4, 4] = torch.ops.aten.select.int(full_6, 0, 0) | |
select_21: f32[4] = torch.ops.aten.select.int(select_20, 0, 3); select_20 = None | |
slice_21: f32[3] = torch.ops.aten.slice.Tensor(select_21, 0, 0, 3); select_21 = None | |
copy_8: f32[3] = torch.ops.aten.copy.default(slice_21, lift_fresh_copy_2); slice_21 = lift_fresh_copy_2 = None | |
select_22: f32[4, 4] = torch.ops.aten.select.int(full_6, 0, 0) | |
select_23: f32[4] = torch.ops.aten.select.int(select_22, 0, 3) | |
slice_scatter_11: f32[4] = torch.ops.aten.slice_scatter.default(select_23, copy_8, 0, 0, 3); select_23 = copy_8 = None | |
select_scatter_13: f32[4, 4] = torch.ops.aten.select_scatter.default(select_22, slice_scatter_11, 0, 3); select_22 = slice_scatter_11 = None | |
select_scatter_14: f32[1, 4, 4] = torch.ops.aten.select_scatter.default(full_6, select_scatter_13, 0, 0); full_6 = select_scatter_13 = None | |
mul: f32[1, 4, 4] = torch.ops.aten.mul.Tensor(slice_scatter_4, select_scatter_14); slice_scatter_4 = select_scatter_14 = None | |
slice_22: f32[2, 24, 2] = torch.ops.aten.slice.Tensor(view_2, 2, 0, 2) | |
slice_23: f32[2, 24, 1] = torch.ops.aten.slice.Tensor(view_2, 2, 2, 3); view_2 = None | |
reciprocal: f32[2, 24, 1] = torch.ops.aten.reciprocal.default(slice_23); slice_23 = None | |
mul_1: f32[2, 24, 1] = torch.ops.aten.mul.Tensor(reciprocal, 1.0); reciprocal = None | |
cat_2: f32[2, 24, 3] = torch.ops.aten.cat.default([slice_22, mul_1], -1); slice_22 = mul_1 = None | |
clone_3: f32[2, 24, 3] = torch.ops.aten.clone.default(cat_2); cat_2 = None | |
full_7: f32[2, 24, 1] = torch.ops.aten.full.default([2, 24, 1], 1, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
cat_3: f32[2, 24, 4] = torch.ops.aten.cat.default([clone_3, full_7], 2); clone_3 = full_7 = None | |
clone_4: f32[1, 4, 4] = torch.ops.aten.clone.default(view_6); view_6 = None | |
clone_5: f32[1, 4, 4] = torch.ops.aten.clone.default(getitem_5); getitem_5 = None | |
bmm: f32[1, 4, 4] = torch.ops.aten.bmm.default(clone_4, clone_5); clone_4 = clone_5 = None | |
clone_6: f32[1, 4, 4] = torch.ops.aten.clone.default(mul); mul = None | |
bmm_1: f32[1, 4, 4] = torch.ops.aten.bmm.default(bmm, clone_6); bmm = clone_6 = None | |
clone_7: f32[1, 4, 4] = torch.ops.aten.clone.default(getitem_4); getitem_4 = None | |
bmm_2: f32[1, 4, 4] = torch.ops.aten.bmm.default(bmm_1, clone_7); bmm_1 = clone_7 = None | |
expand_3: f32[2, 4, 4] = torch.ops.aten.expand.default(bmm_2, [2, -1, -1]); bmm_2 = None | |
bmm_3: f32[2, 24, 4] = torch.ops.aten.bmm.default(cat_3, expand_3); cat_3 = expand_3 = None | |
slice_24: f32[2, 24, 1] = torch.ops.aten.slice.Tensor(bmm_3, 2, 3, 9223372036854775807) | |
slice_25: f32[2, 24, 3] = torch.ops.aten.slice.Tensor(bmm_3, 2, 0, 3); bmm_3 = None | |
div: f32[2, 24, 3] = torch.ops.aten.div.Tensor(slice_25, slice_24); slice_25 = slice_24 = None | |
view_7: f32[2, 4, 6, 3] = torch.ops.aten.view.default(div, [2, 4, 6, 3]); div = None | |
sub: f32[2] = torch.ops.aten.sub.Tensor(rand, rand); rand = None | |
permute_2: f32[2, 4, 6, 3] = torch.ops.aten.permute.default(full_1, [0, 2, 3, 1]); full_1 = None | |
view_8: f32[2, 1, 1, 1] = torch.ops.aten.view.default(sub, [-1, 1, 1, 1]); sub = None | |
mul_2: f32[2, 4, 6, 3] = torch.ops.aten.mul.Tensor(permute_2, view_8); permute_2 = view_8 = None | |
add: f32[2, 4, 6, 3] = torch.ops.aten.add.Tensor(view_7, mul_2); view_7 = mul_2 = None | |
view_9: f32[2, 24, 3] = torch.ops.aten.view.default(add, [2, 24, 3]); add = None | |
arange_8: i64[4] = torch.ops.aten.arange.start_step(0, 4, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
arange_9: i64[4] = torch.ops.aten.arange.start_step(0, 4, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
unsqueeze_5: i64[4, 1] = torch.ops.aten.unsqueeze.default(arange_8, -1); arange_8 = None | |
eq_3: b8[4, 4] = torch.ops.aten.eq.Tensor(unsqueeze_5, arange_9); unsqueeze_5 = arange_9 = None | |
full_8: f32[1] = torch.ops.aten.full.default([1], 1, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
scalar_tensor_3: f32[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')) | |
where_3: f32[4, 4] = torch.ops.aten.where.self(eq_3, full_8, scalar_tensor_3); eq_3 = full_8 = scalar_tensor_3 = None | |
view_10: f32[1, 4, 4] = torch.ops.aten.view.default(where_3, [1, 4, 4]); where_3 = None | |
repeat_2: f32[1, 4, 4] = torch.ops.aten.repeat.default(view_10, [1, 1, 1]); view_10 = None | |
slice_26: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(repeat_2, 0, 0, 9223372036854775807) | |
slice_27: f32[1, 3, 4] = torch.ops.aten.slice.Tensor(slice_26, 1, 0, 3); slice_26 = None | |
slice_28: f32[1, 3, 3] = torch.ops.aten.slice.Tensor(slice_27, 2, 0, 3); slice_27 = None | |
copy_9: f32[1, 3, 3] = torch.ops.aten.copy.default(slice_28, arg3_1); slice_28 = arg3_1 = None | |
slice_29: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(repeat_2, 0, 0, 9223372036854775807) | |
slice_30: f32[1, 3, 4] = torch.ops.aten.slice.Tensor(slice_29, 1, 0, 3) | |
slice_scatter_12: f32[1, 3, 4] = torch.ops.aten.slice_scatter.default(slice_30, copy_9, 2, 0, 3); slice_30 = copy_9 = None | |
slice_scatter_13: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(slice_29, slice_scatter_12, 1, 0, 3); slice_29 = slice_scatter_12 = None | |
slice_scatter_14: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(repeat_2, slice_scatter_13, 0, 0, 9223372036854775807); repeat_2 = slice_scatter_13 = None | |
arange_10: i64[4] = torch.ops.aten.arange.start_step(0, 4, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
arange_11: i64[4] = torch.ops.aten.arange.start_step(0, 4, dtype = torch.int64, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
unsqueeze_6: i64[4, 1] = torch.ops.aten.unsqueeze.default(arange_10, -1); arange_10 = None | |
eq_4: b8[4, 4] = torch.ops.aten.eq.Tensor(unsqueeze_6, arange_11); unsqueeze_6 = arange_11 = None | |
full_9: f32[1] = torch.ops.aten.full.default([1], 1, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
scalar_tensor_4: f32[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')) | |
where_4: f32[4, 4] = torch.ops.aten.where.self(eq_4, full_9, scalar_tensor_4); eq_4 = full_9 = scalar_tensor_4 = None | |
view_11: f32[1, 4, 4] = torch.ops.aten.view.default(where_4, [1, 4, 4]); where_4 = None | |
repeat_3: f32[1, 4, 4] = torch.ops.aten.repeat.default(view_11, [1, 1, 1]); view_11 = None | |
slice_31: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(repeat_3, 0, 0, 9223372036854775807) | |
select_24: f32[1, 4] = torch.ops.aten.select.int(slice_31, 1, 3); slice_31 = None | |
slice_32: f32[1, 3] = torch.ops.aten.slice.Tensor(select_24, 1, 0, 3); select_24 = None | |
copy_10: f32[1, 3] = torch.ops.aten.copy.default(slice_32, arg4_1); slice_32 = arg4_1 = None | |
slice_33: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(repeat_3, 0, 0, 9223372036854775807) | |
select_25: f32[1, 4] = torch.ops.aten.select.int(slice_33, 1, 3) | |
slice_scatter_15: f32[1, 4] = torch.ops.aten.slice_scatter.default(select_25, copy_10, 1, 0, 3); select_25 = copy_10 = None | |
select_scatter_15: f32[1, 4, 4] = torch.ops.aten.select_scatter.default(slice_33, slice_scatter_15, 1, 3); slice_33 = slice_scatter_15 = None | |
slice_scatter_16: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(repeat_3, select_scatter_15, 0, 0, 9223372036854775807); repeat_3 = select_scatter_15 = None | |
clone_8: f32[1, 4, 4] = torch.ops.aten.clone.default(slice_scatter_14); slice_scatter_14 = None | |
unbind_2 = torch.ops.aten.unbind.int(arg1_1, 1); arg1_1 = None | |
getitem_6: f32[1] = unbind_2[0] | |
getitem_7: f32[1] = unbind_2[1]; unbind_2 = None | |
unbind_3 = torch.ops.aten.unbind.int(arg2_1, 1); arg2_1 = None | |
getitem_8: f32[1] = unbind_3[0] | |
getitem_9: f32[1] = unbind_3[1]; unbind_3 = None | |
full_10: f32[1, 4, 4] = torch.ops.aten.full.default([1, 4, 4], 0, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
slice_34: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(full_10, 0, 0, 9223372036854775807) | |
select_26: f32[1, 4] = torch.ops.aten.select.int(slice_34, 1, 0); slice_34 = None | |
select_27: f32[1] = torch.ops.aten.select.int(select_26, 1, 0); select_26 = None | |
copy_11: f32[1] = torch.ops.aten.copy.default(select_27, getitem_6); select_27 = getitem_6 = None | |
slice_35: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(full_10, 0, 0, 9223372036854775807) | |
select_28: f32[1, 4] = torch.ops.aten.select.int(slice_35, 1, 0) | |
select_scatter_16: f32[1, 4] = torch.ops.aten.select_scatter.default(select_28, copy_11, 1, 0); select_28 = copy_11 = None | |
select_scatter_17: f32[1, 4, 4] = torch.ops.aten.select_scatter.default(slice_35, select_scatter_16, 1, 0); slice_35 = select_scatter_16 = None | |
slice_scatter_17: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(full_10, select_scatter_17, 0, 0, 9223372036854775807); full_10 = select_scatter_17 = None | |
slice_36: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_17, 0, 0, 9223372036854775807) | |
select_29: f32[1, 4] = torch.ops.aten.select.int(slice_36, 1, 1); slice_36 = None | |
select_30: f32[1] = torch.ops.aten.select.int(select_29, 1, 1); select_29 = None | |
copy_12: f32[1] = torch.ops.aten.copy.default(select_30, getitem_7); select_30 = getitem_7 = None | |
slice_37: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_17, 0, 0, 9223372036854775807) | |
select_31: f32[1, 4] = torch.ops.aten.select.int(slice_37, 1, 1) | |
select_scatter_18: f32[1, 4] = torch.ops.aten.select_scatter.default(select_31, copy_12, 1, 1); select_31 = copy_12 = None | |
select_scatter_19: f32[1, 4, 4] = torch.ops.aten.select_scatter.default(slice_37, select_scatter_18, 1, 1); slice_37 = select_scatter_18 = None | |
slice_scatter_18: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(slice_scatter_17, select_scatter_19, 0, 0, 9223372036854775807); slice_scatter_17 = select_scatter_19 = None | |
slice_38: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_18, 0, 0, 9223372036854775807) | |
select_32: f32[1, 4] = torch.ops.aten.select.int(slice_38, 1, 0); slice_38 = None | |
select_33: f32[1] = torch.ops.aten.select.int(select_32, 1, 2); select_32 = None | |
copy_13: f32[1] = torch.ops.aten.copy.default(select_33, getitem_8); select_33 = getitem_8 = None | |
slice_39: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_18, 0, 0, 9223372036854775807) | |
select_34: f32[1, 4] = torch.ops.aten.select.int(slice_39, 1, 0) | |
select_scatter_20: f32[1, 4] = torch.ops.aten.select_scatter.default(select_34, copy_13, 1, 2); select_34 = copy_13 = None | |
select_scatter_21: f32[1, 4, 4] = torch.ops.aten.select_scatter.default(slice_39, select_scatter_20, 1, 0); slice_39 = select_scatter_20 = None | |
slice_scatter_19: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(slice_scatter_18, select_scatter_21, 0, 0, 9223372036854775807); slice_scatter_18 = select_scatter_21 = None | |
slice_40: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_19, 0, 0, 9223372036854775807) | |
select_35: f32[1, 4] = torch.ops.aten.select.int(slice_40, 1, 1); slice_40 = None | |
select_36: f32[1] = torch.ops.aten.select.int(select_35, 1, 2); select_35 = None | |
copy_14: f32[1] = torch.ops.aten.copy.default(select_36, getitem_9); select_36 = getitem_9 = None | |
slice_41: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_19, 0, 0, 9223372036854775807) | |
select_37: f32[1, 4] = torch.ops.aten.select.int(slice_41, 1, 1) | |
select_scatter_22: f32[1, 4] = torch.ops.aten.select_scatter.default(select_37, copy_14, 1, 2); select_37 = copy_14 = None | |
select_scatter_23: f32[1, 4, 4] = torch.ops.aten.select_scatter.default(slice_41, select_scatter_22, 1, 1); slice_41 = select_scatter_22 = None | |
slice_scatter_20: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(slice_scatter_19, select_scatter_23, 0, 0, 9223372036854775807); slice_scatter_19 = select_scatter_23 = None | |
_tensor_constant3: f32[] = self._tensor_constant3 | |
lift_fresh_copy_3: f32[] = torch.ops.aten.lift_fresh_copy.default(_tensor_constant3); _tensor_constant3 = None | |
slice_42: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_20, 0, 0, 9223372036854775807) | |
select_38: f32[1, 4] = torch.ops.aten.select.int(slice_42, 1, 3); slice_42 = None | |
select_39: f32[1] = torch.ops.aten.select.int(select_38, 1, 2); select_38 = None | |
copy_15: f32[1] = torch.ops.aten.copy.default(select_39, lift_fresh_copy_3); select_39 = lift_fresh_copy_3 = None | |
slice_43: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_20, 0, 0, 9223372036854775807) | |
select_40: f32[1, 4] = torch.ops.aten.select.int(slice_43, 1, 3) | |
select_scatter_24: f32[1, 4] = torch.ops.aten.select_scatter.default(select_40, copy_15, 1, 2); select_40 = copy_15 = None | |
select_scatter_25: f32[1, 4, 4] = torch.ops.aten.select_scatter.default(slice_43, select_scatter_24, 1, 3); slice_43 = select_scatter_24 = None | |
slice_scatter_21: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(slice_scatter_20, select_scatter_25, 0, 0, 9223372036854775807); slice_scatter_20 = select_scatter_25 = None | |
_tensor_constant4: f32[] = self._tensor_constant4 | |
lift_fresh_copy_4: f32[] = torch.ops.aten.lift_fresh_copy.default(_tensor_constant4); _tensor_constant4 = None | |
slice_44: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_21, 0, 0, 9223372036854775807) | |
select_41: f32[1, 4] = torch.ops.aten.select.int(slice_44, 1, 2); slice_44 = None | |
select_42: f32[1] = torch.ops.aten.select.int(select_41, 1, 3); select_41 = None | |
copy_16: f32[1] = torch.ops.aten.copy.default(select_42, lift_fresh_copy_4); select_42 = lift_fresh_copy_4 = None | |
slice_45: f32[1, 4, 4] = torch.ops.aten.slice.Tensor(slice_scatter_21, 0, 0, 9223372036854775807) | |
select_43: f32[1, 4] = torch.ops.aten.select.int(slice_45, 1, 2) | |
select_scatter_26: f32[1, 4] = torch.ops.aten.select_scatter.default(select_43, copy_16, 1, 3); select_43 = copy_16 = None | |
select_scatter_27: f32[1, 4, 4] = torch.ops.aten.select_scatter.default(slice_45, select_scatter_26, 1, 2); slice_45 = select_scatter_26 = None | |
slice_scatter_22: f32[1, 4, 4] = torch.ops.aten.slice_scatter.default(slice_scatter_21, select_scatter_27, 0, 0, 9223372036854775807); slice_scatter_21 = select_scatter_27 = None | |
permute_3: f32[1, 4, 4] = torch.ops.aten.permute.default(slice_scatter_22, [0, 2, 1]); slice_scatter_22 = None | |
clone_9: f32[1, 4, 4] = torch.ops.aten.clone.default(permute_3, memory_format = torch.contiguous_format); permute_3 = None | |
view_12: f32[1, 4, 4] = torch.ops.aten.view.default(clone_9, [-1, 4, 4]); clone_9 = None | |
clone_10: f32[1, 4, 4] = torch.ops.aten.clone.default(clone_8); clone_8 = None | |
view_13: f32[2, 4, 6, 3] = torch.ops.aten.view.default(view_9, [2, 4, 6, 3]); view_9 = None | |
view_14: f32[2, 24, 3] = torch.ops.aten.view.default(view_13, [2, 24, 3]); view_13 = None | |
clone_11: f32[2, 24, 3] = torch.ops.aten.clone.default(view_14); view_14 = None | |
full_11: f32[2, 24, 1] = torch.ops.aten.full.default([2, 24, 1], 1, dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
cat_4: f32[2, 24, 4] = torch.ops.aten.cat.default([clone_11, full_11], 2); clone_11 = full_11 = None | |
clone_12: f32[1, 4, 4] = torch.ops.aten.clone.default(clone_10); clone_10 = None | |
clone_13: f32[1, 4, 4] = torch.ops.aten.clone.default(slice_scatter_16); slice_scatter_16 = None | |
bmm_4: f32[1, 4, 4] = torch.ops.aten.bmm.default(clone_12, clone_13); clone_12 = clone_13 = None | |
clone_14: f32[1, 4, 4] = torch.ops.aten.clone.default(view_12); view_12 = None | |
bmm_5: f32[1, 4, 4] = torch.ops.aten.bmm.default(bmm_4, clone_14); bmm_4 = clone_14 = None | |
expand_4: f32[2, 4, 4] = torch.ops.aten.expand.default(bmm_5, [2, -1, -1]); bmm_5 = None | |
bmm_6: f32[2, 24, 4] = torch.ops.aten.bmm.default(cat_4, expand_4); cat_4 = expand_4 = None | |
slice_46: f32[2, 24, 1] = torch.ops.aten.slice.Tensor(bmm_6, 2, 3, 9223372036854775807) | |
sign: f32[2, 24, 1] = torch.ops.aten.sign.default(slice_46) | |
eq_5: b8[2, 24, 1] = torch.ops.aten.eq.Scalar(slice_46, 0.0) | |
_to_copy: f32[2, 24, 1] = torch.ops.aten._to_copy.default(eq_5, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')); eq_5 = None | |
add_1: f32[2, 24, 1] = torch.ops.aten.add.Tensor(sign, _to_copy); sign = _to_copy = None | |
abs_1: f32[2, 24, 1] = torch.ops.aten.abs.default(slice_46); slice_46 = None | |
clamp: f32[2, 24, 1] = torch.ops.aten.clamp.default(abs_1, 1e-08); abs_1 = None | |
mul_3: f32[2, 24, 1] = torch.ops.aten.mul.Tensor(add_1, clamp); add_1 = clamp = None | |
slice_47: f32[2, 24, 3] = torch.ops.aten.slice.Tensor(bmm_6, 2, 0, 3); bmm_6 = None | |
div_1: f32[2, 24, 3] = torch.ops.aten.div.Tensor(slice_47, mul_3); slice_47 = mul_3 = None | |
view_15: f32[2, 4, 6, 3] = torch.ops.aten.view.default(div_1, [2, 4, 6, 3]); div_1 = None | |
slice_48: f32[2, 4, 6, 2] = torch.ops.aten.slice.Tensor(view_15, 3, 0, 2); view_15 = None | |
arange_12: i64[2] = torch.ops.aten.arange.start_step(0, 2, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
view_16: i64[2, 1, 1, 1] = torch.ops.aten.view.default(arange_12, [2, 1, 1, 1]); arange_12 = None | |
arange_13: i64[3] = torch.ops.aten.arange.start_step(0, 3, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
view_17: i64[1, 3, 1, 1] = torch.ops.aten.view.default(arange_13, [1, 3, 1, 1]); arange_13 = None | |
select_44: f32[2, 4, 6] = torch.ops.aten.select.int(slice_48, 3, 0) | |
select_45: f32[2, 4, 6] = torch.ops.aten.select.int(slice_48, 3, 1); slice_48 = None | |
mul_4: f32[2, 4, 6] = torch.ops.aten.mul.Tensor(select_44, 3.0); select_44 = None | |
add_2: f32[2, 4, 6] = torch.ops.aten.add.Tensor(mul_4, 2.5); mul_4 = None | |
clamp_1: f32[2, 4, 6] = torch.ops.aten.clamp.default(add_2, 0, 5); add_2 = None | |
mul_5: f32[2, 4, 6] = torch.ops.aten.mul.Tensor(select_45, 2.0); select_45 = None | |
add_3: f32[2, 4, 6] = torch.ops.aten.add.Tensor(mul_5, 1.5); mul_5 = None | |
clamp_2: f32[2, 4, 6] = torch.ops.aten.clamp.default(add_3, 0, 3); add_3 = None | |
floor: f32[2, 4, 6] = torch.ops.aten.floor.default(clamp_1) | |
floor_1: f32[2, 4, 6] = torch.ops.aten.floor.default(clamp_2) | |
add_4: f32[2, 4, 6] = torch.ops.aten.add.Tensor(floor, 1) | |
add_5: f32[2, 4, 6] = torch.ops.aten.add.Tensor(floor_1, 1) | |
sub_1: f32[2, 4, 6] = torch.ops.aten.sub.Tensor(add_4, clamp_1) | |
sub_2: f32[2, 4, 6] = torch.ops.aten.sub.Tensor(add_5, clamp_2) | |
mul_6: f32[2, 4, 6] = torch.ops.aten.mul.Tensor(sub_1, sub_2); sub_1 = sub_2 = None | |
sub_3: f32[2, 4, 6] = torch.ops.aten.sub.Tensor(clamp_1, floor) | |
sub_4: f32[2, 4, 6] = torch.ops.aten.sub.Tensor(add_5, clamp_2) | |
mul_7: f32[2, 4, 6] = torch.ops.aten.mul.Tensor(sub_3, sub_4); sub_3 = sub_4 = None | |
sub_5: f32[2, 4, 6] = torch.ops.aten.sub.Tensor(add_4, clamp_1) | |
sub_6: f32[2, 4, 6] = torch.ops.aten.sub.Tensor(clamp_2, floor_1) | |
mul_8: f32[2, 4, 6] = torch.ops.aten.mul.Tensor(sub_5, sub_6); sub_5 = sub_6 = None | |
sub_7: f32[2, 4, 6] = torch.ops.aten.sub.Tensor(clamp_1, floor); clamp_1 = None | |
sub_8: f32[2, 4, 6] = torch.ops.aten.sub.Tensor(clamp_2, floor_1); clamp_2 = None | |
mul_9: f32[2, 4, 6] = torch.ops.aten.mul.Tensor(sub_7, sub_8); sub_7 = sub_8 = None | |
ge: b8[2, 4, 6] = torch.ops.aten.ge.Scalar(floor, 0) | |
lt: b8[2, 4, 6] = torch.ops.aten.lt.Scalar(floor, 6) | |
ge_1: b8[2, 4, 6] = torch.ops.aten.ge.Scalar(floor_1, 0) | |
lt_1: b8[2, 4, 6] = torch.ops.aten.lt.Scalar(floor_1, 4) | |
logical_and: b8[2, 4, 6] = torch.ops.aten.logical_and.default(ge_1, lt_1); ge_1 = lt_1 = None | |
logical_and_1: b8[2, 4, 6] = torch.ops.aten.logical_and.default(lt, logical_and); lt = logical_and = None | |
logical_and_2: b8[2, 4, 6] = torch.ops.aten.logical_and.default(ge, logical_and_1); ge = logical_and_1 = None | |
_to_copy_1: i64[2, 4, 6] = torch.ops.aten._to_copy.default(floor, dtype = torch.int64) | |
_to_copy_2: i64[2, 4, 6] = torch.ops.aten._to_copy.default(floor_1, dtype = torch.int64) | |
scalar_tensor_5: i64[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.int64, layout = torch.strided, device = device(type='cpu')) | |
where_5: i64[2, 4, 6] = torch.ops.aten.where.self(logical_and_2, _to_copy_1, scalar_tensor_5); _to_copy_1 = scalar_tensor_5 = None | |
view_18: i64[2, 1, 4, 6] = torch.ops.aten.view.default(where_5, [2, 1, 4, 6]); where_5 = None | |
scalar_tensor_6: i64[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.int64, layout = torch.strided, device = device(type='cpu')) | |
where_6: i64[2, 4, 6] = torch.ops.aten.where.self(logical_and_2, _to_copy_2, scalar_tensor_6); _to_copy_2 = scalar_tensor_6 = None | |
view_19: i64[2, 1, 4, 6] = torch.ops.aten.view.default(where_6, [2, 1, 4, 6]); where_6 = None | |
scalar_tensor_7: f32[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')) | |
where_7: f32[2, 4, 6] = torch.ops.aten.where.self(logical_and_2, mul_6, scalar_tensor_7); logical_and_2 = mul_6 = scalar_tensor_7 = None | |
view_20: f32[2, 1, 4, 6] = torch.ops.aten.view.default(where_7, [2, 1, 4, 6]); where_7 = None | |
index: f32[2, 3, 4, 6] = torch.ops.aten.index.Tensor(arg5_1, [view_16, view_17, view_19, view_18]); view_19 = view_18 = None | |
mul_10: f32[2, 3, 4, 6] = torch.ops.aten.mul.Tensor(index, view_20); index = view_20 = None | |
ge_2: b8[2, 4, 6] = torch.ops.aten.ge.Scalar(add_4, 0) | |
lt_2: b8[2, 4, 6] = torch.ops.aten.lt.Scalar(add_4, 6) | |
ge_3: b8[2, 4, 6] = torch.ops.aten.ge.Scalar(floor_1, 0) | |
lt_3: b8[2, 4, 6] = torch.ops.aten.lt.Scalar(floor_1, 4) | |
logical_and_3: b8[2, 4, 6] = torch.ops.aten.logical_and.default(ge_3, lt_3); ge_3 = lt_3 = None | |
logical_and_4: b8[2, 4, 6] = torch.ops.aten.logical_and.default(lt_2, logical_and_3); lt_2 = logical_and_3 = None | |
logical_and_5: b8[2, 4, 6] = torch.ops.aten.logical_and.default(ge_2, logical_and_4); ge_2 = logical_and_4 = None | |
_to_copy_3: i64[2, 4, 6] = torch.ops.aten._to_copy.default(add_4, dtype = torch.int64) | |
_to_copy_4: i64[2, 4, 6] = torch.ops.aten._to_copy.default(floor_1, dtype = torch.int64); floor_1 = None | |
scalar_tensor_8: i64[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.int64, layout = torch.strided, device = device(type='cpu')) | |
where_8: i64[2, 4, 6] = torch.ops.aten.where.self(logical_and_5, _to_copy_3, scalar_tensor_8); _to_copy_3 = scalar_tensor_8 = None | |
view_21: i64[2, 1, 4, 6] = torch.ops.aten.view.default(where_8, [2, 1, 4, 6]); where_8 = None | |
scalar_tensor_9: i64[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.int64, layout = torch.strided, device = device(type='cpu')) | |
where_9: i64[2, 4, 6] = torch.ops.aten.where.self(logical_and_5, _to_copy_4, scalar_tensor_9); _to_copy_4 = scalar_tensor_9 = None | |
view_22: i64[2, 1, 4, 6] = torch.ops.aten.view.default(where_9, [2, 1, 4, 6]); where_9 = None | |
scalar_tensor_10: f32[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')) | |
where_10: f32[2, 4, 6] = torch.ops.aten.where.self(logical_and_5, mul_7, scalar_tensor_10); logical_and_5 = mul_7 = scalar_tensor_10 = None | |
view_23: f32[2, 1, 4, 6] = torch.ops.aten.view.default(where_10, [2, 1, 4, 6]); where_10 = None | |
index_1: f32[2, 3, 4, 6] = torch.ops.aten.index.Tensor(arg5_1, [view_16, view_17, view_22, view_21]); view_22 = view_21 = None | |
mul_11: f32[2, 3, 4, 6] = torch.ops.aten.mul.Tensor(index_1, view_23); index_1 = view_23 = None | |
add_6: f32[2, 3, 4, 6] = torch.ops.aten.add.Tensor(mul_10, mul_11); mul_10 = mul_11 = None | |
ge_4: b8[2, 4, 6] = torch.ops.aten.ge.Scalar(floor, 0) | |
lt_4: b8[2, 4, 6] = torch.ops.aten.lt.Scalar(floor, 6) | |
ge_5: b8[2, 4, 6] = torch.ops.aten.ge.Scalar(add_5, 0) | |
lt_5: b8[2, 4, 6] = torch.ops.aten.lt.Scalar(add_5, 4) | |
logical_and_6: b8[2, 4, 6] = torch.ops.aten.logical_and.default(ge_5, lt_5); ge_5 = lt_5 = None | |
logical_and_7: b8[2, 4, 6] = torch.ops.aten.logical_and.default(lt_4, logical_and_6); lt_4 = logical_and_6 = None | |
logical_and_8: b8[2, 4, 6] = torch.ops.aten.logical_and.default(ge_4, logical_and_7); ge_4 = logical_and_7 = None | |
_to_copy_5: i64[2, 4, 6] = torch.ops.aten._to_copy.default(floor, dtype = torch.int64); floor = None | |
_to_copy_6: i64[2, 4, 6] = torch.ops.aten._to_copy.default(add_5, dtype = torch.int64) | |
scalar_tensor_11: i64[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.int64, layout = torch.strided, device = device(type='cpu')) | |
where_11: i64[2, 4, 6] = torch.ops.aten.where.self(logical_and_8, _to_copy_5, scalar_tensor_11); _to_copy_5 = scalar_tensor_11 = None | |
view_24: i64[2, 1, 4, 6] = torch.ops.aten.view.default(where_11, [2, 1, 4, 6]); where_11 = None | |
scalar_tensor_12: i64[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.int64, layout = torch.strided, device = device(type='cpu')) | |
where_12: i64[2, 4, 6] = torch.ops.aten.where.self(logical_and_8, _to_copy_6, scalar_tensor_12); _to_copy_6 = scalar_tensor_12 = None | |
view_25: i64[2, 1, 4, 6] = torch.ops.aten.view.default(where_12, [2, 1, 4, 6]); where_12 = None | |
scalar_tensor_13: f32[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')) | |
where_13: f32[2, 4, 6] = torch.ops.aten.where.self(logical_and_8, mul_8, scalar_tensor_13); logical_and_8 = mul_8 = scalar_tensor_13 = None | |
view_26: f32[2, 1, 4, 6] = torch.ops.aten.view.default(where_13, [2, 1, 4, 6]); where_13 = None | |
index_2: f32[2, 3, 4, 6] = torch.ops.aten.index.Tensor(arg5_1, [view_16, view_17, view_25, view_24]); view_25 = view_24 = None | |
mul_12: f32[2, 3, 4, 6] = torch.ops.aten.mul.Tensor(index_2, view_26); index_2 = view_26 = None | |
add_7: f32[2, 3, 4, 6] = torch.ops.aten.add.Tensor(add_6, mul_12); add_6 = mul_12 = None | |
ge_6: b8[2, 4, 6] = torch.ops.aten.ge.Scalar(add_4, 0) | |
lt_6: b8[2, 4, 6] = torch.ops.aten.lt.Scalar(add_4, 6) | |
ge_7: b8[2, 4, 6] = torch.ops.aten.ge.Scalar(add_5, 0) | |
lt_7: b8[2, 4, 6] = torch.ops.aten.lt.Scalar(add_5, 4) | |
logical_and_9: b8[2, 4, 6] = torch.ops.aten.logical_and.default(ge_7, lt_7); ge_7 = lt_7 = None | |
logical_and_10: b8[2, 4, 6] = torch.ops.aten.logical_and.default(lt_6, logical_and_9); lt_6 = logical_and_9 = None | |
logical_and_11: b8[2, 4, 6] = torch.ops.aten.logical_and.default(ge_6, logical_and_10); ge_6 = logical_and_10 = None | |
_to_copy_7: i64[2, 4, 6] = torch.ops.aten._to_copy.default(add_4, dtype = torch.int64); add_4 = None | |
_to_copy_8: i64[2, 4, 6] = torch.ops.aten._to_copy.default(add_5, dtype = torch.int64); add_5 = None | |
scalar_tensor_14: i64[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.int64, layout = torch.strided, device = device(type='cpu')) | |
where_14: i64[2, 4, 6] = torch.ops.aten.where.self(logical_and_11, _to_copy_7, scalar_tensor_14); _to_copy_7 = scalar_tensor_14 = None | |
view_27: i64[2, 1, 4, 6] = torch.ops.aten.view.default(where_14, [2, 1, 4, 6]); where_14 = None | |
scalar_tensor_15: i64[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.int64, layout = torch.strided, device = device(type='cpu')) | |
where_15: i64[2, 4, 6] = torch.ops.aten.where.self(logical_and_11, _to_copy_8, scalar_tensor_15); _to_copy_8 = scalar_tensor_15 = None | |
view_28: i64[2, 1, 4, 6] = torch.ops.aten.view.default(where_15, [2, 1, 4, 6]); where_15 = None | |
scalar_tensor_16: f32[] = torch.ops.aten.scalar_tensor.default(0, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')) | |
where_16: f32[2, 4, 6] = torch.ops.aten.where.self(logical_and_11, mul_9, scalar_tensor_16); logical_and_11 = mul_9 = scalar_tensor_16 = None | |
view_29: f32[2, 1, 4, 6] = torch.ops.aten.view.default(where_16, [2, 1, 4, 6]); where_16 = None | |
index_3: f32[2, 3, 4, 6] = torch.ops.aten.index.Tensor(arg5_1, [view_16, view_17, view_28, view_27]); arg5_1 = view_16 = view_17 = view_28 = view_27 = None | |
mul_13: f32[2, 3, 4, 6] = torch.ops.aten.mul.Tensor(index_3, view_29); index_3 = view_29 = None | |
add_8: f32[2, 3, 4, 6] = torch.ops.aten.add.Tensor(add_7, mul_13); add_7 = mul_13 = None | |
return (add_8,) | |
Graph Signature: ExportGraphSignature(parameters=[], buffers=['L__self___mask', 'L__self___camera_focal_length', 'L__self___camera_principal_point', 'L__self___camera_R', 'L__self___camera_T'], user_inputs=['arg5_1'], user_outputs=['add_8'], inputs_to_parameters={}, inputs_to_buffers={'arg0_1': 'L__self___mask', 'arg1_1': 'L__self___camera_focal_length', 'arg2_1': 'L__self___camera_principal_point', 'arg3_1': 'L__self___camera_R', 'arg4_1': 'L__self___camera_T'}, buffers_to_mutate={}, backward_signature=None, assertion_dep_token=None) | |
Symbol to range: {} | |
=========================== short test summary info ============================ | |
FAILED torchworld/transforms/test_sfm.py::TestSFM::test_export - AssertionErr... | |
======================= 1 failed, 1 deselected in 4.51s ======================== |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment