Skip to content

Commit

Permalink
[riscv-tests] Updated tests for single lane
Browse files Browse the repository at this point in the history
  • Loading branch information
sharafat-10xEngineers committed Dec 23, 2022
1 parent 0c18343 commit 588578e
Show file tree
Hide file tree
Showing 178 changed files with 38,409 additions and 0 deletions.
40 changes: 40 additions & 0 deletions apps/riscv-tests/isa/rv64uv/1_lane_tests/Makefrag
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
#Copyright 2021 ETH Zurich and University of Bologna.
#Solderpad Hardware License, Version 0.51, see LICENSE for details.
#SPDX - License - Identifier : SHL - 0.51
#
#Author : Matheus Cavalcante < matheusd @iis.ee.ethz.ch>
#Basile Bougenot < bbougenot @student.ethz.ch>

rv64uv_sc_tests = vaadd vaaddu vsadd vsaddu vsmul vssra vssrl vnclip vnclipu vadd
vsub vrsub vwaddu vwsubu vwadd vwsub vsext vzext vadc vmadc vsbc vmsbc vand vor
vxor vsll vsrl vsra vnsrl vnsra vmseq vmsne vmsltu vmslt vmsleu vmsle vmsgtu
vmsgt vminu vmin vmaxu vmax vmul vmulh vmulhu vmulhsu vdivu vdiv vremu
vrem vwmul vwmulu vwmulsu vmacc vnmsac vmadd vnmsub vwmaccu vwmacc
vwmaccsu vwmaccus vmerge vmv vmvxs vmvsx vfmvfs vfmvsf vmvnrr
vredsum vredmaxu vredmax vredminu vredmin vredand vredor
vredxor vwredsumu vwredsum vfadd vfsub vfrsub vfwadd
vfwsub vfmul vfdiv vfrdiv vfwmul vfmacc vfnmacc vfmsac
vfnmsac vfmadd vfnmadd vfmsub vfnmsub vfwmacc
vfwnmacc vfwmsac vfwnmsac vfsqrt vfmin vfmax vfredusum vfredosum vfredmin vfredmax
vfwredusum vfwredosum vfclass vfsgnj vfsgnjn vfsgnjx vfmerge
vfmv vmfeq vmfne vmflt vmfle vmfgt vmfge vfcvt vfwcvt vfncvt
vmand vmnand vmandnot vmor vmnor vmornot vmxor vmxnor vslideup vslidedown
vslide1up vfslide1up vslide1down vfslide1down vl
vl1r vle1 vls vluxei vs
vs1r vse1 vss vsuxei vsetivli vsetvli
vsetvl vmsbf vmsof vmsif viota vid vcpop vfirst vle8
vse8 vle16 vse16 vle32 vse32 vle64 vse64

#rv64uv_sc_tests = vaadd vaaddu vadc vasub vasubu vcompress vfirst vid viota \
vl vlff vl_nocheck vlx vmsbf vmsif vmsof vpopc_m vrgather vsadd vsaddu \
vsetvl vsetivli vsetvli vsmul vssra vssrl vssub vssubu vsux vsx

rv64uv_p_tests = $(
addprefix
rv64uv -
p -
,
$(rv64uv_sc_tests))

spike_ctests +=
$(rv64uv_p_tests)
59 changes: 59 additions & 0 deletions apps/riscv-tests/isa/rv64uv/1_lane_tests/vaadd.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
// Copyright 2021 ETH Zurich and University of Bologna.
// Solderpad Hardware License, Version 0.51, see LICENSE for details.
// SPDX-License-Identifier: SHL-0.51
//
// Author: Matheus Cavalcante <[email protected]>
// Basile Bougenot <[email protected]>

#include "vector_macros.h"

void TEST_CASE1(void) {
set_vxrm(0); // setting vxrm to rnu rounding mode
VSET(4, e8, m1);
VLOAD_8(v1, 1, -2, -3, 4);
VLOAD_8(v2, 1, 2, -3, 3);
__asm__ volatile("vaadd.vv v3, v1, v2" ::);
VCMP_U8(1, v3, 1, 0, -3, 4);
}

void TEST_CASE2(void) {
set_vxrm(1); // setting vxrm to rne rounding mode
VSET(4, e8, m1);
VLOAD_8(v1, 1, -2, -3, 4);
VLOAD_8(v2, 1, 9, -3, 5);
VLOAD_8(v0, 0xA, 0x0, 0x0, 0x0);
VCLEAR(v3);
__asm__ volatile("vaadd.vv v3, v1, v2, v0.t" ::);
VCMP_U8(2, v3, 0, 4, 0, 4);
}

void TEST_CASE3(void) {
set_vxrm(2); // setting vxrm to rdn rounding mode
VSET(4, e32, m1);
VLOAD_32(v1, 1, -2, 3, -4);
const uint32_t scalar = 5;
__asm__ volatile("vaadd.vx v3, v1, %[A]" ::[A] "r"(scalar));
VCMP_U32(3, v3, 3, 1, 4, 0);
}

// Dont use VCLEAR here, it results in a glitch where are values are off by 1
void TEST_CASE4(void) {
set_vxrm(3); // setting vxrm to rod rounding mode
VSET(4, e32, m1);
VLOAD_32(v1, 1, 2, 3, 4);
const uint32_t scalar = 5;
VLOAD_32(v0, 0xA, 0x0, 0x0, 0x0);
VCLEAR(v3);
__asm__ volatile("vaadd.vx v3, v1, %[A], v0.t" ::[A] "r"(scalar));
VCMP_U32(4, v3, 0, 3, 0, 5);
}

int main(void) {
INIT_CHECK();
enable_vec();
TEST_CASE1();
TEST_CASE2();
TEST_CASE3();
TEST_CASE4();
EXIT_CHECK();
}
59 changes: 59 additions & 0 deletions apps/riscv-tests/isa/rv64uv/1_lane_tests/vaaddu.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
// Copyright 2021 ETH Zurich and University of Bologna.
// Solderpad Hardware License, Version 0.51, see LICENSE for details.
// SPDX-License-Identifier: SHL-0.51
//
// Author: Matheus Cavalcante <[email protected]>
// Basile Bougenot <[email protected]>

#include "vector_macros.h"

void TEST_CASE1(void) {
set_vxrm(0); // setting vxrm to rnu rounding mode
VSET(4, e8, m1);
VLOAD_8(v1, 1, 2, 3, 5);
VLOAD_8(v2, 1, 3, 8, 4);
__asm__ volatile("vaaddu.vv v3, v1, v2" ::);
VCMP_U8(1, v3, 1, 3, 6, 5);
}

void TEST_CASE2(void) {
set_vxrm(1); // setting vxrm to rne rounding mode
VSET(4, e8, m1);
VLOAD_8(v1, 5, 8, 3, 7);
VLOAD_8(v2, 7, 5, 3, 5);
VLOAD_8(v0, 0x0A, 0x00, 0x00, 0x00);
VCLEAR(v3);
__asm__ volatile("vaaddu.vv v3, v1, v2, v0.t" ::);
VCMP_U8(2, v3, 0, 6, 0, 6);
}

void TEST_CASE3(void) {
set_vxrm(2); // setting vxrm to rdn rounding mode
VSET(4, e32, m1);
VLOAD_32(v1, 1, 2, 3, 4);
const uint32_t scalar = 5;
__asm__ volatile("vaaddu.vx v3, v1, %[A]" ::[A] "r"(scalar));
VCMP_U32(3, v3, 3, 3, 4, 4);
}

// Dont use VCLEAR here, it results in a glitch where are values are off by 1
void TEST_CASE4(void) {
set_vxrm(3); // setting vxrm to rod rounding mode
VSET(4, e32, m1);
VLOAD_32(v1, 1, 2, 3, 4);
const uint32_t scalar = 5;
VLOAD_32(v0, 0xA, 0x0, 0x0, 0x0);
VCLEAR(v3);
__asm__ volatile("vaaddu.vx v3, v1, %[A], v0.t" ::[A] "r"(scalar));
VCMP_U32(4, v3, 0, 3, 0, 5);
}

int main(void) {
INIT_CHECK();
enable_vec();
TEST_CASE1();
TEST_CASE2();
TEST_CASE3();
TEST_CASE4();
EXIT_CHECK();
}
103 changes: 103 additions & 0 deletions apps/riscv-tests/isa/rv64uv/1_lane_tests/vadc.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
// Copyright 2021 ETH Zurich and University of Bologna.
// Solderpad Hardware License, Version 0.51, see LICENSE for details.
// SPDX-License-Identifier: SHL-0.51
//
// Author: Matheus Cavalcante <[email protected]>
// Basile Bougenot <[email protected]>

#include "vector_macros.h"

void TEST_CASE1(void) {
VSET(16, e8, m1);
VLOAD_8(v1, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, -7, 7);
VLOAD_8(v2, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, -8);
VLOAD_8(v0, 0xAA, 0xAA);
asm volatile("vadc.vvm v3, v1, v2, v0");
VCMP_U8(1, v3, 9, 10, 9, 10, 9, 10, 9, 10, 2, 5, 6, 9, 10, 13, 0, 0);

VSET(16, e16, m2);
VLOAD_16(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, -7, 7);
VLOAD_16(v4, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, -8);
VLOAD_8(v0, 0xAA, 0xAA);
asm volatile("vadc.vvm v6, v2, v4, v0");
VCMP_U16(2, v6, 9, 10, 9, 10, 9, 10, 9, 10, 2, 5, 6, 9, 10, 13, 0, 0);

VSET(16, e32, m4);
VLOAD_32(v4, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, -7, 7);
VLOAD_32(v8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, -8);
VLOAD_8(v0, 0xAA, 0xAA);
asm volatile("vadc.vvm v12, v4, v8, v0");
VCMP_U32(3, v12, 9, 10, 9, 10, 9, 10, 9, 10, 2, 5, 6, 9, 10, 13, 0, 0);

VSET(16, e64, m8);
VLOAD_64(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, -7, 7);
VLOAD_64(v16, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, -8);
VLOAD_8(v0, 0xAA, 0xAA);
asm volatile("vadc.vvm v24, v8, v16, v0");
VCMP_U64(4, v24, 9, 10, 9, 10, 9, 10, 9, 10, 2, 5, 6, 9, 10, 13, 0, 0);
};

void TEST_CASE2(void) {
const uint32_t scalar = 5;

VSET(16, e8, m1);
VLOAD_8(v1, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
asm volatile("vadc.vxm v3, v1, %[A], v0" ::[A] "r"(scalar));
VCMP_U8(5, v3, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);

VSET(16, e16, m2);
VLOAD_16(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
asm volatile("vadc.vxm v4, v2, %[A], v0" ::[A] "r"(scalar));
VCMP_U16(6, v4, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);

VSET(16, e32, m4);
VLOAD_32(v4, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
asm volatile("vadc.vxm v8, v4, %[A], v0" ::[A] "r"(scalar));
VCMP_U32(7, v8, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);

VSET(16, e64, m8);
VLOAD_64(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
asm volatile("vadc.vxm v16, v8, %[A], v0" ::[A] "r"(scalar));
VCMP_U64(8, v16, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);
};

void TEST_CASE3(void) {
VSET(16, e8, m1);
VLOAD_8(v1, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
asm volatile("vadc.vim v3, v1, 5, v0");
VCMP_U8(9, v3, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);

VSET(16, e16, m2);
VLOAD_16(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
asm volatile("vadc.vim v4, v2, 5, v0");
VCMP_U16(10, v4, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);

VSET(16, e32, m4);
VLOAD_32(v4, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
asm volatile("vadc.vim v8, v4, 5, v0");
VCMP_U32(11, v8, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);

VSET(16, e64, m8);
VLOAD_64(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
asm volatile("vadc.vim v16, v8, 5, v0");
VCMP_U64(12, v16, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);
};

int main(void) {
INIT_CHECK();
enable_vec();

TEST_CASE1();
TEST_CASE2();
TEST_CASE3();

EXIT_CHECK();
}
Loading

0 comments on commit 588578e

Please sign in to comment.