Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Test Issues #13

Open
GregorySchwing opened this issue Oct 14, 2023 · 0 comments
Open

Test Issues #13

GregorySchwing opened this issue Oct 14, 2023 · 0 comments

Comments

@GregorySchwing
Copy link

After fixing build issues with #12, two tests failed

root@51e1485d94b7:/TorchVision/numml# pytest numml/tests
============================================================= test session starts ==============================================================
platform linux -- Python 3.10.12, pytest-7.4.2, pluggy-1.3.0
rootdir: /TorchVision/numml
plugins: shard-0.1.2, hypothesis-5.35.1, rerunfailures-12.0, xdist-3.3.1, xdoctest-1.0.2, flakefinder-1.1.0
collected 33 items
Running 33 items in this shard

numml/tests/test_add.py ... [ 9%]
numml/tests/test_misc.py . [ 12%]
numml/tests/test_spdmm.py .FF.. [ 27%]
numml/tests/test_spmm.py ....... [ 48%]
numml/tests/test_spmv.py ...... [ 66%]
numml/tests/test_sptrsv.py ...... [ 84%]
numml/tests/test_transpose.py ..... [100%]

=================================================================== FAILURES ===================================================================
______________________________________________________________ test_random_small _______________________________________________________________

def test_random_small():
    it = 10
    for i in range(it):
        Nc = random.randint(3, 10)
        X = torch.randn(A_N, Nc)
        X_c = X.to(gpu)
        print('X_c shape', X_c.shape)
        print(X_c)

        AX_d = A_d @ X
      assert(torch.allclose(AX_d, A@X))

E AssertionError: assert False
E + where False = <built-in method allclose of type object at 0x7facd8b4f680>(tensor([[-8.0905e-01, -1.2791e+00, 2.1114e+00, -2.1343e+00, -5.8658e+00],\n [ 2.3513e+00, 1.4423e+00, -2.9569e...00, -1.0391e+00, -5.9623e-01, -4.8469e+00],\n [-2.3855e+00, -1.8825e+00, -4.6459e+00, -8.6935e-01, 3.7740e+00]]), (<16x16 sparse matrix tensor of type 'torch.float32'\n with 46 stored elements in Compressed Sparse Row format> @ tensor([[ 2.3843e-01, -1.6153e-01, 5.9662e-01, -1.7112e+00, -2.1326e+00],\n [ 1.2859e+00, 9.5606e-01, -9.1817e...02, -1.1889e+00, 5.0320e-02, -1.0626e+00],\n [-6.7612e-01, -9.1202e-01, -2.9174e+00, -4.0951e-01, 1.3557e+00]])))
E + where <built-in method allclose of type object at 0x7facd8b4f680> = torch.allclose

numml/tests/test_spdmm.py:43: AssertionError
------------------------------------------------------------- Captured stdout call -------------------------------------------------------------
X_c shape torch.Size([16, 7])
tensor([[-1.4404, 0.9939, -0.2333, -2.4501, -2.3983, -1.0288, -1.5292],
[-1.1281, -1.9627, -1.0358, 1.0002, 1.2289, 0.0903, 0.7665],
[-0.8329, 1.7655, -1.1728, -0.1793, -1.4545, 0.0141, -1.2997],
[-1.7013, -0.9780, 0.5817, -1.3643, 1.5334, 0.6137, 0.5878],
[ 0.4688, 0.1218, 0.8664, -1.0398, -0.2259, 2.2077, -0.3796],
[-1.3176, -0.0595, 2.0575, 1.5325, -0.7838, -0.1799, 0.7953],
[ 0.2436, -2.7121, 0.0796, -0.2203, 0.2659, 0.9575, -0.4840],
[ 0.5368, 1.4393, -0.4030, -0.5211, -0.5826, -1.3434, -1.5291],
[-0.0376, 0.3784, 1.1600, 1.3821, 1.3911, -1.4542, -1.5568],
[ 1.1818, 0.9652, 1.0323, 0.5053, -0.0812, -1.3966, 0.6781],
[-1.1266, -0.4934, 1.1943, 0.5738, -1.2757, 0.7131, 1.4122],
[-0.6217, -0.2607, -1.1545, -0.5109, 1.4249, -0.0774, -1.4336],
[ 0.0060, -1.3877, 0.3932, -0.8707, -0.5830, 0.7535, 0.9527],
[ 0.1212, -0.0844, -0.8639, 0.1214, 0.6723, 1.7152, -0.2713],
[-1.3067, 0.8132, 0.4800, -2.3894, 0.9714, 0.4336, -1.1133],
[ 0.7836, 0.4225, 0.0109, -1.2518, -1.2735, -0.3628, -0.5556]],
device='cuda:0')
X_c shape torch.Size([16, 6])
tensor([[-1.6990, 0.4994, 0.5237, 0.5681, -0.4384, 1.2278],
[-0.5838, 0.1167, 0.7284, 0.8329, 0.0827, -0.0480],
[ 0.2050, 1.3393, -1.5127, -0.9296, -1.1537, 1.8951],
[-0.7593, -0.6198, -1.6280, -1.8682, -1.8920, 0.1182],
[ 1.7719, -0.4381, -1.1646, -1.5857, -2.2401, 1.6927],
[ 1.2371, -0.7035, -0.5878, -0.5940, 1.0570, -0.1962],
[-1.1086, -2.3059, 0.3850, -0.6142, 1.1206, 0.3247],
[ 0.2323, -0.7529, -0.6670, -0.0404, 1.4693, -0.9065],
[-1.6904, -0.4596, -0.3784, 2.1711, 0.8126, 2.7665],
[-1.9220, 0.6336, -0.8892, -0.5160, 1.7726, 0.6870],
[-1.9859, 0.8231, -1.2517, -0.6015, 0.6485, 0.5330],
[-0.7359, -0.4978, -0.1557, 0.4194, -2.9327, -1.5208],
[-0.6135, -0.6400, -1.1061, -1.6915, -0.3025, 0.9853],
[-1.4310, 1.7253, -0.6249, -0.2569, 1.1259, 1.0723],
[ 0.5355, -0.5477, -0.3024, -0.2601, 0.0102, -0.1565],
[ 0.0215, -0.2547, -0.2166, -0.5851, 0.4905, 0.5024]],
device='cuda:0')
X_c shape torch.Size([16, 4])
tensor([[ 0.1297, -0.6206, -1.3438, -0.2378],
[ 0.3542, -2.1727, -0.7693, 0.1832],
[-2.0531, -0.2708, 0.4542, -0.7819],
[-0.6752, -0.1414, 0.8351, -0.5958],
[ 0.4637, -0.6149, 1.0920, -0.4849],
[ 0.0196, 0.5081, -1.0010, -0.2156],
[ 0.3359, 0.7699, -2.2112, -1.8252],
[ 0.6903, 1.0862, -1.0192, 1.3107],
[-0.2100, -1.2446, -0.4514, 0.6777],
[ 0.8924, -1.0165, 0.4438, -0.0431],
[ 1.4194, -0.1714, -0.0895, 0.0612],
[ 0.1829, 0.6200, 0.8062, -0.8499],
[-0.5414, -1.6655, 0.6044, 0.9376],
[ 0.2957, 0.7887, 0.4509, 0.1071],
[ 0.0645, -0.5269, -1.9421, -1.1922],
[ 1.1843, -1.8400, -0.0630, -0.2972]], device='cuda:0')
X_c shape torch.Size([16, 7])
tensor([[ 1.4878e+00, -4.0182e-01, 2.2026e+00, -5.4703e-01, -1.5376e+00,
-1.0124e+00, -1.7798e-01],
[ 3.1039e+00, 1.3340e+00, -6.4154e-01, 2.7985e+00, -1.0064e+00,
-1.2514e-01, -8.9587e-01],
[ 6.9080e-01, -5.0479e-03, -5.4363e-01, -1.3511e+00, 2.3356e-01,
-4.4866e-01, -2.3332e+00],
[ 3.6111e-01, -5.2257e-01, -8.4437e-01, 1.4156e+00, -2.1662e-01,
-6.6325e-01, -1.4181e-01],
[ 7.6379e-01, -1.5159e+00, 2.3365e-01, 2.8765e-01, 1.8937e+00,
-1.7489e-01, -5.9196e-01],
[ 6.6749e-01, 1.1846e+00, 5.3885e-01, 1.5514e+00, 1.6079e+00,
1.3231e+00, -6.3911e-01],
[-5.4540e-02, 4.1947e-01, -1.6453e-01, 2.9159e+00, -8.3123e-01,
-1.0161e+00, 7.0982e-01],
[-4.6030e-01, 1.4809e+00, 1.4565e+00, -1.4879e+00, -1.6303e-02,
1.0789e+00, 4.5324e-01],
[-6.6465e-01, -5.4234e-01, -8.2486e-01, 4.7613e-01, -4.2572e-01,
3.7492e-01, 1.8957e-01],
[ 2.1584e-01, -1.6270e+00, -4.7006e-02, -2.4820e+00, -3.4601e-01,
-1.2760e-01, -4.9856e-01],
[-1.7065e+00, 4.8403e-01, 5.1602e-01, -9.9702e-01, 7.8479e-01,
1.3271e+00, -3.4873e-01],
[ 1.2887e+00, -7.6163e-01, -5.9705e-01, -2.8159e-02, -1.8242e-01,
1.0442e+00, -3.6987e-01],
[-3.2847e-01, 7.2676e-01, -1.1154e+00, -2.7572e-01, 1.0034e+00,
-3.6583e-01, -6.5653e-01],
[ 1.1268e+00, -2.1012e-01, 9.4497e-01, 1.4431e-03, 1.4943e-01,
-2.2855e+00, 1.1309e+00],
[ 8.3730e-01, 8.8567e-01, 2.4612e+00, 2.3620e-01, 3.0178e-01,
3.9953e-01, 7.6512e-01],
[ 1.2079e+00, -7.4040e-01, -1.3897e-01, 1.8495e+00, -1.0368e+00,
6.5823e-01, 1.1544e+00]], device='cuda:0')
X_c shape torch.Size([16, 9])
tensor([[ 1.1608e+00, 5.2404e-01, -9.3447e-01, 8.4869e-01, -1.6773e-01,
7.4402e-01, 3.3067e-01, 1.3007e+00, -9.4865e-02],
[ 5.6259e-01, 5.4271e-01, 1.9070e+00, 9.6501e-01, -2.9819e-01,
1.2864e+00, 1.5435e+00, -6.1125e-01, -5.1652e-01],
[-1.0947e+00, 3.3532e-02, 1.6763e+00, -1.9187e-01, 8.5340e-01,
-2.9716e-01, -6.9661e-01, -1.4154e+00, 4.9596e-01],
[-1.5290e+00, 4.0677e-01, 1.1593e+00, 6.7598e-01, 3.6662e-01,
-7.1687e-01, -5.2561e-01, 9.8510e-01, -1.2586e+00],
[-6.7048e-01, 3.8925e-01, 3.8757e-01, 3.9510e-01, -2.3837e-01,
-3.9225e-01, -2.2230e-01, 3.3597e-01, 7.5240e-01],
[-7.9070e-02, 4.8587e-01, -3.1904e-01, -1.8768e+00, -1.2998e-01,
-1.8312e+00, 8.6905e-01, 1.4286e+00, 4.3833e-01],
[-7.1978e-01, 4.6438e-01, 2.9762e+00, 6.1793e-01, 5.5364e-01,
-7.6758e-01, -3.9675e-02, 2.0812e+00, -6.5035e-01],
[ 1.4078e+00, 5.3098e-01, 1.7682e+00, 1.2678e+00, 2.0861e-01,
2.8300e+00, -4.1974e-01, -1.3530e+00, -2.2785e+00],
[-1.8561e+00, -1.2427e-01, 9.4302e-01, 2.0212e+00, 5.6076e-01,
-8.7159e-01, -2.2218e-01, -8.7846e-01, 9.1332e-01],
[ 6.2230e-02, -2.7110e+00, -9.0537e-01, -7.0086e-01, -4.0505e-01,
-1.0166e+00, 1.7999e+00, 9.1718e-01, 1.1454e+00],
[ 1.9940e+00, -1.5937e+00, -1.3100e+00, -1.5429e-01, 1.4527e+00,
-2.9646e-01, -6.5418e-01, -1.5161e+00, -5.7280e-01],
[-2.1334e-01, -2.4639e+00, 2.1572e+00, 8.8547e-01, -9.6068e-01,
-7.8268e-01, -3.3942e-01, -1.7174e-01, -2.7654e-01],
[ 1.2832e-01, -5.5500e-01, 1.1688e-01, 7.3154e-01, -1.5545e+00,
-6.0215e-01, -1.7040e-01, -4.1701e-02, 5.9348e-01],
[ 1.0569e+00, -6.8000e-01, -1.0715e+00, -1.2657e+00, -6.0673e-01,
7.7856e-01, -2.0067e-03, 2.1625e-01, -3.8065e-01],
[ 4.5311e-01, 7.3426e-01, -3.1287e+00, -4.6920e-01, -6.8173e-01,
-5.4592e-01, -2.2556e+00, -1.1776e+00, -2.0990e-01],
[ 6.1662e-01, 1.3936e+00, -7.9541e-01, 1.5003e+00, -3.3568e-01,
5.5757e-01, -6.6173e-01, 1.5195e+00, -1.0046e+00]], device='cuda:0')
X_c shape torch.Size([16, 10])
tensor([[ 1.2644, 0.9153, -1.4336, 0.7042, 0.6132, 0.2158, 0.2887, 0.5955,
-1.4903, 0.4799],
[ 0.4747, -0.5038, 0.2039, 0.4999, 1.4249, -0.6385, -0.1321, 0.0411,
-0.0987, 0.5058],
[-0.2237, 1.0426, 1.3616, 2.1435, -0.2582, -0.1537, 0.6851, -0.1747,
2.4723, 0.8146],
[ 1.1464, 0.1538, 0.2178, -2.1835, -0.5254, 0.5341, 0.0711, 1.0723,
1.5233, 0.8579],
[-0.2565, -0.6761, 1.3049, -1.2890, -0.9296, -0.3167, -1.4590, -0.3813,
0.6864, 0.9067],
[-0.0045, -0.6211, -1.3761, -1.8368, -0.7799, -1.4577, 1.2145, -0.4969,
0.8128, -0.0879],
[-0.2819, 1.1318, 0.5851, 1.5346, -0.5031, 1.0811, 1.4598, -0.2922,
-1.0698, -0.6239],
[ 1.5007, 0.4776, 0.5756, -0.1036, 0.3009, -1.4226, 0.1117, 0.0685,
1.4614, -0.6837],
[ 0.2659, -0.1216, 0.4524, -0.8880, -0.0903, 0.0514, -0.3307, 1.3727,
0.4569, 0.1330],
[-1.3085, 0.3482, 0.8168, 1.4885, -1.0427, 0.9636, -0.2865, -0.1337,
0.4671, -0.9516],
[ 0.3257, 0.4804, 0.8782, -2.1633, 2.6182, -1.2561, 1.5588, 0.4776,
-1.4705, -1.6446],
[ 0.0566, 0.8201, -0.8358, 1.4604, -0.6825, 0.1172, -0.0529, -1.3245,
1.3474, 0.5735],
[ 0.8320, -0.2725, -0.8190, 0.2139, -0.6147, 1.8202, 0.8190, -0.1298,
0.7880, 0.4006],
[-0.2897, 0.3501, -0.0573, -1.8452, -1.0914, 0.7466, 1.7042, 0.8599,
0.2791, 0.3213],
[-0.8833, 1.0887, -1.6795, 0.2387, 1.3289, 1.0220, -0.4792, -0.3981,
-0.2759, -0.3885],
[ 0.3195, 1.4533, -0.5149, -0.4989, 0.0384, 0.1927, 1.1188, -1.7946,
0.1168, 0.0319]], device='cuda:0')
X_c shape torch.Size([16, 9])
tensor([[-0.1261, 1.1235, -1.0696, -1.1387, 0.2743, -0.5753, 0.5699, 0.5149,
-1.5009],
[-0.0163, 0.8872, 0.9338, 0.0028, -0.7828, -1.7374, 0.0482, 0.6348,
-0.6524],
[-2.3185, -1.5195, 1.6479, 0.4001, 1.1418, 1.3328, 0.3003, 1.3518,
0.4094],
[-1.4756, -1.3459, -0.3559, 2.3344, 0.4912, -0.2207, -0.5194, 1.3649,
-0.1092],
[ 2.0059, -0.7026, -1.1630, 0.3063, -0.3568, -0.4022, -0.1154, -2.0210,
-0.2269],
[ 0.1390, 0.1577, -0.4332, 1.3558, 1.3769, 0.7643, 0.7959, 0.1422,
0.6731],
[-1.1134, -1.7257, -0.6762, 0.2199, -0.7438, -1.0302, -1.3823, 1.9751,
-0.2361],
[-2.2063, 0.1420, -2.1714, 0.4063, 0.1646, 0.2098, 0.7066, -0.1464,
-0.3691],
[-0.2976, -2.1344, 0.8700, 0.5639, 1.2127, -1.7652, -1.5514, 1.2440,
-0.0477],
[ 1.1270, -2.5411, -2.1104, 0.6838, 0.5407, 0.3054, 0.0783, -0.6014,
-1.3387],
[-0.9373, 1.9748, -0.3357, 1.2090, -1.0583, -0.2297, -0.8720, 0.2073,
0.0692],
[ 0.4263, -0.9635, 0.3399, -0.1582, 1.5743, -1.2042, -1.2970, 1.5115,
-0.8326],
[-1.3463, -1.0039, -1.3930, 1.9752, 0.1632, -1.1564, -1.1395, 1.0007,
-0.0174],
[-0.7713, 1.0199, -0.1383, 0.6839, -1.4275, -1.4894, 0.5242, 1.1992,
-1.1920],
[ 1.3805, 0.6728, -0.1094, 1.4066, 0.8237, 0.3745, -1.4426, 2.3637,
-1.2894],
[ 0.7094, -0.9870, -0.8011, -1.8140, -0.1459, -0.4534, -1.2952, -0.4278,
0.9144]], device='cuda:0')
X_c shape torch.Size([16, 5])
tensor([[ 2.3843e-01, -1.6153e-01, 5.9662e-01, -1.7112e+00, -2.1326e+00],
[ 1.2859e+00, 9.5606e-01, -9.1817e-01, -1.2881e+00, 1.6006e+00],
[-1.7942e-02, 6.3140e-01, 5.2398e-01, -2.7630e-01, 1.5381e+00],
[-5.1545e-01, 6.3432e-01, -6.3058e-01, 1.4679e-02, 1.4887e+00],
[-1.0812e+00, -5.2346e-02, 2.2321e-01, 8.6249e-02, 7.4195e-01],
[ 3.1881e-01, 5.8811e-01, 1.0562e+00, 1.3342e-01, 2.9426e-01],
[-1.1309e+00, 2.4535e+00, -5.7642e-01, 2.8675e-01, -1.9409e-01],
[-1.0309e+00, -5.4439e-01, -7.7303e-01, 3.2667e-02, 1.8129e+00],
[-6.7964e-04, -8.9906e-01, 2.0343e+00, 3.6000e-01, -2.7563e-01],
[-3.0619e-01, 1.5841e+00, 1.5015e+00, -8.4094e-01, 3.2306e-01],
[-2.0362e-01, -5.3436e-01, 9.7085e-01, 9.9474e-01, -3.6796e-01],
[-8.1160e-01, 2.9902e+00, 1.3559e-01, -5.7836e-01, -7.6623e-01],
[ 1.2366e+00, 8.4008e-01, 3.7793e-01, 4.3765e-01, -3.7793e-01],
[-9.6175e-01, -2.6040e-01, 1.5787e+00, 1.1064e+00, 1.3660e+00],
[ 1.0333e+00, 5.8460e-02, -1.1889e+00, 5.0320e-02, -1.0626e+00],
[-6.7612e-01, -9.1202e-01, -2.9174e+00, -4.0951e-01, 1.3557e+00]],
device='cuda:0')
______________________________________________________________ test_random_large _______________________________________________________________

def test_random_large():
    it = 5
    for i in range(it):
        Nc = random.randint(3, 6)
        X = torch.randn(AL_N, Nc)
        X_c = X.to(gpu)

        AX_d = AL_d @ X
      assert(torch.allclose(AX_d, AL@X))

E AssertionError: assert False
E + where False = <built-in method allclose of type object at 0x7facd8b4f680>(tensor([[-3.1022, -0.1757, 0.2265, 2.6405, 2.6282],\n [ 6.8254, 1.6969, -2.2275, -5.1129, -1.2788],\n ...4.5011],\n [-0.8021, -0.4414, 1.2198, -0.4238, -4.7221],\n [ 3.6725, -0.1307, -2.1412, -0.1562, 3.5673]]), (<2048x2048 sparse matrix tensor of type 'torch.float32'\n with 6142 stored elements in Compressed Sparse Row format> @ tensor([[-0.4234, 0.3763, -0.2115, -0.0504, 1.1204],\n [ 2.2554, 0.9283, -0.6494, -2.7413, -0.3875],\n ...1.1468],\n [ 1.0123, -0.3853, 0.2250, -0.2176, -1.1945],\n [ 2.3424, -0.2580, -0.9581, -0.1869, 1.1864]])))
E + where <built-in method allclose of type object at 0x7facd8b4f680> = torch.allclose

numml/tests/test_spdmm.py:55: AssertionError
=========================================================== short test summary info ============================================================
FAILED numml/tests/test_spdmm.py::test_random_small - AssertionError: assert False
FAILED numml/tests/test_spdmm.py::test_random_large - AssertionError: assert False
======================================================== 2 failed, 31 passed in 12.38s =========================================================
root@51e1485d94b7:/TorchVision/numml#

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant