diff --git a/test/regression/aarch64/instructions/sme.cc b/test/regression/aarch64/instructions/sme.cc index d908d13a1..75be221ae 100644 --- a/test/regression/aarch64/instructions/sme.cc +++ b/test/regression/aarch64/instructions/sme.cc @@ -37,7 +37,7 @@ TEST_P(InstSme, add) { )"); const uint16_t zaStride = (SVL / 8) / 2; const uint16_t zaHalfIndex = 2; - for (uint64_t i = 0; i < (SVL / 8); i++) { + for (uint16_t i = 0; i < (SVL / 8); i++) { if (i == zaHalfIndex) { CHECK_MAT_ROW(AARCH64_REG_ZA, i, uint32_t, fillNeon({104}, (SVL / 8))); @@ -279,7 +279,7 @@ TEST_P(InstSme, fadd) { )"); const uint16_t zaStride = (SVL / 8) / 2; const uint16_t zaHalfIndex = 2; - for (uint64_t i = 0; i < (SVL / 8); i++) { + for (uint16_t i = 0; i < (SVL / 8); i++) { if (i == zaHalfIndex) { CHECK_MAT_ROW(AARCH64_REG_ZA, i, float, fillNeon({21.5f}, (SVL / 8))); @@ -330,7 +330,7 @@ TEST_P(InstSme, fadd) { fadd za.d[w8, #1, vgx2], {z4.d, z5.d} )"); - for (uint64_t i = 0; i < (SVL / 8); i++) { + for (uint16_t i = 0; i < (SVL / 8); i++) { if (i == zaHalfIndex) { CHECK_MAT_ROW(AARCH64_REG_ZA, i, double, fillNeon({21.5}, (SVL / 8))); @@ -382,7 +382,7 @@ TEST_P(InstSme, fmla_multiVecs) { )"); const uint16_t zaStride = (SVL / 8) / 4; const uint16_t zaQuartIndex = 2; - for (uint64_t i = 0; i < (SVL / 8); i++) { + for (uint16_t i = 0; i < (SVL / 8); i++) { // Effected rows all use same zm value of 2.0f if (i == zaQuartIndex) { CHECK_MAT_ROW(AARCH64_REG_ZA, i, float, @@ -441,7 +441,7 @@ TEST_P(InstSme, fmla_multiVecs) { fmla za.d[w8, #1, vgx4], {z4.d - z7.d}, {z8.d - z11.d} )"); - for (uint64_t i = 0; i < (SVL / 8); i++) { + for (uint16_t i = 0; i < (SVL / 8); i++) { // Effected rows all use same zm value of 2.0 if (i == zaQuartIndex) { CHECK_MAT_ROW(AARCH64_REG_ZA, i, double, @@ -501,7 +501,7 @@ TEST_P(InstSme, fmla_indexed_vgx4) { )"); const uint16_t zaStride = (SVL / 8) / 4; const uint16_t zaQuartIndex = 2; - for (uint64_t i = 0; i < (SVL / 8); i++) { + for (uint16_t i = 0; i < (SVL / 8); i++) { // Effected rows all use same zm value of 2.0f if (i == zaQuartIndex) { CHECK_MAT_ROW(AARCH64_REG_ZA, i, float, @@ -561,7 +561,7 @@ TEST_P(InstSme, fmla_indexed_vgx4) { fmla za.d[w8, #1, vgx4], {z4.d - z7.d}, z10.d[0] )"); - for (uint64_t i = 0; i < (SVL / 8); i++) { + for (uint16_t i = 0; i < (SVL / 8); i++) { // Effected rows all use same zm value of 2.0f if (i == zaQuartIndex) { CHECK_MAT_ROW(AARCH64_REG_ZA, i, double, @@ -605,7 +605,7 @@ TEST_P(InstSme, fmopa) { fmopa za2.s, p0/m, p2/m, z3.s, z4.s )"); - for (uint64_t i = 0; i < (SVL / 32); i++) { + for (uint16_t i = 0; i < (SVL / 32); i++) { CHECK_MAT_ROW(AARCH64_REG_ZAS0, i, float, fillNeon({10.0f}, (SVL / 8))); CHECK_MAT_ROW(AARCH64_REG_ZAS2, i, float, @@ -633,7 +633,7 @@ TEST_P(InstSme, fmopa) { fmopa za2.d, p0/m, p2/m, z3.d, z4.d )"); - for (uint64_t i = 0; i < (SVL / 64); i++) { + for (uint16_t i = 0; i < (SVL / 64); i++) { CHECK_MAT_ROW(AARCH64_REG_ZAD0, i, double, fillNeon({10.0}, (SVL / 8))); CHECK_MAT_ROW(AARCH64_REG_ZAD2, i, double, @@ -850,7 +850,7 @@ TEST_P(InstSme, st1d) { st1d {za0h.d[w12, 0]}, p0, [sp, x1, lsl #3] st1d {za1h.d[w12, 1]}, p0, [x4] )"); - for (uint64_t i = 0; i < (SVL / 64); i++) { + for (uint16_t i = 0; i < (SVL / 64); i++) { EXPECT_EQ(getMemoryValue(process_->getInitialStackPointer() - 4095 + (i * 8)), src[i % 2]); @@ -880,7 +880,7 @@ TEST_P(InstSme, st1d) { ld1d {za1h.d[w13, 1]}, p1/z, [x0, x3, lsl #3] st1d {za1h.d[w13, 1]}, p1, [x5, x3, lsl #3] )"); - for (uint64_t i = 0; i < (SVL / 128); i++) { + for (uint16_t i = 0; i < (SVL / 128); i++) { EXPECT_EQ(getMemoryValue(800 + (i * 8)), src[i % 2]); EXPECT_EQ(getMemoryValue(800 + 16 + (i * 8)), src[i % 2]); } @@ -911,7 +911,7 @@ TEST_P(InstSme, st1d) { st1d {za0v.d[w12, 0]}, p0, [sp, x1, lsl #3] st1d {za1v.d[w12, 1]}, p0, [x4] )"); - for (uint64_t i = 0; i < (SVL / 64); i++) { + for (uint16_t i = 0; i < (SVL / 64); i++) { EXPECT_EQ(getMemoryValue(process_->getInitialStackPointer() - 4095 + (i * 8)), src_vert[i % 2]); @@ -941,7 +941,7 @@ TEST_P(InstSme, st1d) { ld1d {za1v.d[w13, 1]}, p1/z, [x0, x3, lsl #3] st1d {za1v.d[w13, 1]}, p1, [x5, x3, lsl #3] )"); - for (uint64_t i = 0; i < (SVL / 128); i++) { + for (uint16_t i = 0; i < (SVL / 128); i++) { EXPECT_EQ(getMemoryValue(800 + (i * 8)), src_vert[i % 2]); EXPECT_EQ(getMemoryValue(800 + 16 + (i * 8)), src_vert[i % 2]); } @@ -974,7 +974,7 @@ TEST_P(InstSme, st1w) { st1w {za0h.s[w12, 0]}, p0, [sp, x1, lsl #2] st1w {za1h.s[w12, 1]}, p0, [x4] )"); - for (uint64_t i = 0; i < (SVL / 32); i++) { + for (uint16_t i = 0; i < (SVL / 32); i++) { EXPECT_EQ(getMemoryValue(process_->getInitialStackPointer() - 4095 + (i * 4)), src[i % 4]); @@ -1003,7 +1003,7 @@ TEST_P(InstSme, st1w) { ld1w {za1h.s[w12, 2]}, p1/z, [x0, x3, lsl #2] st1w {za1h.s[w12, 2]}, p1, [x5, x3, lsl #2] )"); - for (uint64_t i = 0; i < (SVL / 64); i++) { + for (uint16_t i = 0; i < (SVL / 64); i++) { EXPECT_EQ(getMemoryValue(800 + (i * 4)), src[i % 4]); EXPECT_EQ(getMemoryValue(800 + 16 + (i * 4)), src[i % 4]); } @@ -1035,7 +1035,7 @@ TEST_P(InstSme, st1w) { st1w {za0v.s[w12, 0]}, p0, [sp, x1, lsl #2] st1w {za1v.s[w12, 1]}, p0, [x4] )"); - for (uint64_t i = 0; i < (SVL / 32); i++) { + for (uint16_t i = 0; i < (SVL / 32); i++) { EXPECT_EQ(getMemoryValue(process_->getInitialStackPointer() - 4095 + (i * 4)), src_vert[i % 4]); @@ -1064,7 +1064,7 @@ TEST_P(InstSme, st1w) { ld1w {za1v.s[w12, 2]}, p1/z, [x0, x3, lsl #2] st1w {za1v.s[w12, 2]}, p1, [x5, x3, lsl #2] )"); - for (uint64_t i = 0; i < (SVL / 64); i++) { + for (uint16_t i = 0; i < (SVL / 64); i++) { EXPECT_EQ(getMemoryValue(800 + (i * 4)), src_vert[i % 4]); EXPECT_EQ(getMemoryValue(800 + 16 + (i * 4)), src_vert[i % 4]); } @@ -1109,7 +1109,7 @@ TEST_P(InstSme, udot_Indexed_vgx4) { )"); const uint16_t zaStride = (SVL / 8) / 4; const uint16_t zaQuartIndex = 2; - for (uint64_t i = 0; i < (SVL / 8); i++) { + for (uint16_t i = 0; i < (SVL / 8); i++) { // Effected rows all use same zm values of {8, 9, 10, 11} if (i == zaQuartIndex) { CHECK_MAT_ROW(AARCH64_REG_ZA, i, uint32_t, @@ -1173,7 +1173,7 @@ TEST_P(InstSme, udot_vgx4) { )"); const uint16_t zaStride = (SVL / 8) / 4; const uint16_t zaQuartIndex = 2; - for (uint64_t i = 0; i < (SVL / 8); i++) { + for (uint16_t i = 0; i < (SVL / 8); i++) { if (i == zaQuartIndex) { CHECK_MAT_ROW(AARCH64_REG_ZA, i, uint32_t, fillNeon({156, 316, 476, 636}, (SVL / 8))); @@ -1233,7 +1233,7 @@ TEST_P(InstSme, uvdot_indexed_vgx4) { )"); const uint16_t zaStride = (SVL / 8) / 4; const uint16_t zaQuartIndex = 2; - for (uint64_t i = 0; i < (SVL / 8); i++) { + for (uint16_t i = 0; i < (SVL / 8); i++) { // Effected rows all use same zm values of {8, 9, 10, 11} if (i == zaQuartIndex) { CHECK_MAT_ROW(AARCH64_REG_ZA, i, uint32_t, @@ -1279,7 +1279,7 @@ TEST_P(InstSme, umopa) { umopa za2.s, p0/m, p2/m, z3.b, z4.b )"); - for (uint64_t i = 0; i < (SVL / 32); i++) { + for (uint16_t i = 0; i < (SVL / 32); i++) { CHECK_MAT_ROW(AARCH64_REG_ZAS0, i, uint32_t, fillNeon({96}, (SVL / 8))); CHECK_MAT_ROW(AARCH64_REG_ZAS2, i, uint32_t, @@ -1302,7 +1302,7 @@ TEST_P(InstSme, zero) { zero {za} )"); - for (uint64_t i = 0; i < (SVL / 8); i++) { + for (uint16_t i = 0; i < (SVL / 8); i++) { CHECK_MAT_ROW(AARCH64_REG_ZA, i, uint64_t, fillNeon({0}, SVL / 8)); } @@ -1339,7 +1339,7 @@ TEST_P(InstSme, zero) { zero {za0.s, za2.s} )"); - for (uint64_t i = 0; i < (SVL / 32); i++) { + for (uint16_t i = 0; i < (SVL / 32); i++) { CHECK_MAT_ROW(AARCH64_REG_ZAS0, i, uint32_t, fillNeon({0}, SVL / 8)); CHECK_MAT_ROW(AARCH64_REG_ZAS2, i, uint32_t, diff --git a/test/regression/aarch64/instructions/sve.cc b/test/regression/aarch64/instructions/sve.cc index f9699593f..9411ef008 100644 --- a/test/regression/aarch64/instructions/sve.cc +++ b/test/regression/aarch64/instructions/sve.cc @@ -2884,7 +2884,7 @@ TEST_P(InstSve, faddv) { )"); float s3 = 0.0f; float s4 = 0.0f; - for (int i = 0; i < VL / 32; i++) { + for (uint64_t i = 0; i < VL / 32; i++) { s3 += fsrc[i % (fsrc.size())]; if (i < (VL / 64)) s4 += fsrc[i % (fsrc.size())]; } @@ -2922,7 +2922,7 @@ TEST_P(InstSve, faddv) { )"); double d3 = 0.0; double d4 = 0.0; - for (int i = 0; i < VL / 64; i++) { + for (uint64_t i = 0; i < (VL / 64); i++) { d3 += dsrc[i % (dsrc.size())]; if (i < (VL / 128)) d4 += dsrc[i % (dsrc.size())]; }