From 667dfcbc5aec07b76c8c2a7c9a312f8c18a65655 Mon Sep 17 00:00:00 2001 From: Aart Bik Date: Mon, 13 May 2024 15:34:26 -0700 Subject: [PATCH] [torch-mlir][sparse] enable test on ReLu (#3336) Downstream MLIR sparsifier has some (rudimentary) support for ReLU now, and this test can now be enabled with correct end-to-end behavior. Also see discussion at: https://discourse.llvm.org/t/min-max-abs-relu-recognition-starter-project/78918 --- test/python/fx_importer/sparse_test.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/test/python/fx_importer/sparse_test.py b/test/python/fx_importer/sparse_test.py index 474fe2bfddbc..9184dc4dc99f 100644 --- a/test/python/fx_importer/sparse_test.py +++ b/test/python/fx_importer/sparse_test.py @@ -459,6 +459,11 @@ def forward(self, x): # CHECK: values=tensor([ 0., 0., 1., 2., 3., 1000.]), # CHECK: size=(10, 20, 30), nnz=6, dtype=torch.float64, layout=torch.sparse_coo) # CHECK: torch.mlir +# CHECK: [0 6] +# CHECK: [0 1 1 4 9 9] +# CHECK: [ 0 1 1 5 19 19] +# CHECK: [ 0 1 3 6 28 29] +# CHECK: [ 0. 0. 1. 2. 3. 1000.] # def test_sparse_coo3(): class COO3Net(torch.nn.Module): @@ -481,11 +486,15 @@ def forward(self, x): # Run it with PyTorch torch.sparse and with TORCH-MLIR sparse_jit. res1 = net(sparse_input) - # TODO: make coo3 work - # res2 = sparse_jit(net, sparse_input) + res2 = sparse_jit(net, sparse_input) print("torch.sparse") print(res1) print("torch.mlir") + print(res2[0]) + print(res2[1]) + print(res2[2]) + print(res2[3]) + print(res2[4]) @run